diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 276a1c57..98097d22 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -80,10 +80,11 @@ where .expect("http::response::Builder is usable"), ); + // TODO: handle timeout let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error: {}", e); Vec::new().into() - }); // TODO: handle timeout + }); if status != 200 { warn!( diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 8a9191d1..06c06dcb 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -138,7 +138,8 @@ pub(crate) async fn upload_signing_keys_route( master_key, &body.self_signing_key, &body.user_signing_key, - true, // notify so that other users see the new keys + // notify so that other users see the new keys + true, )?; } @@ -196,7 +197,8 @@ pub(crate) async fn upload_signatures_route( } Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate + // TODO: integrate + failures: BTreeMap::new(), }) } @@ -252,7 +254,8 @@ pub(crate) async fn get_key_changes_route( } Ok(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO + // TODO + left: Vec::new(), }) } @@ -422,7 +425,8 @@ pub(crate) async fn get_keys_helper bool>( let raw = serde_json::from_value(json).expect("Raw::from_value always works"); services().users.add_cross_signing_keys( &user, &raw, &None, &None, - false, // Dont notify. A notification would trigger another key request resulting in an endless loop + // Dont notify. A notification would trigger another key request resulting in an endless loop + false, )?; master_keys.insert(user, raw); } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 005ce01d..be3d59fa 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -49,7 +49,8 @@ pub(crate) async fn join_room_by_id_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers = Vec::new(); // There is no body.server_name for /roomId/join + // There is no body.server_name for /roomId/join + let mut servers = Vec::new(); servers.extend( services() .rooms @@ -1012,7 +1013,8 @@ async fn join_room_by_id_helper( let authenticated = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, - None::, // TODO: third party invite + // TODO: third party invite + None::, |k, s| { services() .rooms diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 1b266e25..b34f04bc 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -163,7 +163,8 @@ pub(crate) async fn create_room_route( })?, ); } - RoomVersionId::V11 => {} // V11 removed the "creator" key + // V11 removed the "creator" key + RoomVersionId::V11 => {} _ => unreachable!("Validity of room version already checked"), } @@ -790,7 +791,8 @@ pub(crate) async fn upgrade_room_route( .room_state_get(&body.room_id, &event_type, "")? { Some(v) => v.content.clone(), - None => continue, // Skipping missing events. + // Skipping missing events. + None => continue, }; services() diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 9d69d3b8..2b6c9f12 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -74,7 +74,8 @@ pub(crate) async fn search_events_route( "Invalid next_batch token.", )) } - None => 0, // Default to the start + // Default to the start + None => 0, }; let mut results = Vec::new(); @@ -133,10 +134,12 @@ pub(crate) async fn search_events_route( Ok(search_events::v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: None, - groups: BTreeMap::new(), // TODO + // TODO + groups: BTreeMap::new(), next_batch, results, - state: BTreeMap::new(), // TODO + // TODO + state: BTreeMap::new(), highlights: search_criteria .search_term .split_terminator(|c: char| !c.is_alphanumeric()) diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 8ac33241..dda83e10 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -30,7 +30,8 @@ pub(crate) async fn send_state_event_for_key_route( sender_user, &body.room_id, &body.event_type, - &body.body.body, // Yes, I hate it too + // Yes, I hate it too + &body.body.body, body.state_key.clone(), ) .await?; @@ -210,7 +211,8 @@ async fn send_state_event_for_key_helper( .rooms .alias .resolve_local_alias(&alias)? - .filter(|room| room == room_id) // Make sure it's the right room + // Make sure it's the right room + .filter(|room| room == room_id) .is_none() { return Err(Error::BadRequest( diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index d787769c..9f7a2626 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -209,7 +209,8 @@ async fn sync_helper( .unwrap_or(0); let sincecount = PduCount::Normal(since); - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + // Users that have left any encrypted rooms the sender was in + let mut left_encrypted_users = HashSet::new(); let mut device_list_updates = HashSet::new(); let mut device_list_left = HashSet::new(); @@ -492,7 +493,8 @@ async fn sync_helper( leave: left_rooms, join: joined_rooms, invite: invited_rooms, - knock: BTreeMap::new(), // TODO + // TODO + knock: BTreeMap::new(), }, presence: Presence::default(), account_data: GlobalAccountData { @@ -543,7 +545,8 @@ async fn sync_helper( }; Ok((response, false)) } else { - Ok((response, since != next_batch)) // Only cache if we made progress + // Only cache if we made progress + Ok((response, since != next_batch)) } } @@ -1201,7 +1204,8 @@ pub(crate) async fn sync_events_v4_route( .remove_to_device_events(&sender_user, &sender_device, globalsince)?; } - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + // Users that have left any encrypted rooms the sender was in + let mut left_encrypted_users = HashSet::new(); let mut device_list_changes = HashSet::new(); let mut device_list_left = HashSet::new(); @@ -1381,7 +1385,8 @@ pub(crate) async fn sync_events_v4_route( } let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state + // and required state + let mut todo_rooms = BTreeMap::new(); for (list_id, list) in body.lists { if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { @@ -1646,7 +1651,8 @@ pub(crate) async fn sync_events_v4_route( .map(UInt::new_saturating) .unwrap_or(uint!(0)), ), - num_live: None, // Count events in timeline greater than global sync counter + // Count events in timeline greater than global sync counter + num_live: None, timestamp: None, }, ); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 22931cf2..55485023 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -334,7 +334,8 @@ where struct XMatrix { origin: OwnedServerName, - key: String, // KeyName? + // KeyName? + key: String, sig: String, } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 7b0e9afe..86c34276 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -260,10 +260,11 @@ where ); debug!("Getting response bytes from {destination}"); + // TODO: handle timeout let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() - }); // TODO: handle timeout + }); debug!("Got response bytes from {destination}"); if status != 200 { @@ -1555,7 +1556,8 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - event: None, // TODO: handle restricted joins + // TODO: handle restricted joins + event: None, }) } diff --git a/src/config.rs b/src/config.rs index 2313b92f..0f9a9c9c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -239,11 +239,13 @@ fn default_pdu_cache_capacity() -> u32 { } fn default_cleanup_second_interval() -> u32 { - 60 // every minute + // every minute + 60 } fn default_max_request_size() -> u32 { - 20 * 1024 * 1024 // Default to 20 MB + // Default to 20 MB + 20 * 1024 * 1024 } fn default_max_concurrent_requests() -> u16 { diff --git a/src/config/proxy.rs b/src/config/proxy.rs index 2adab9f6..91ada136 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -45,7 +45,8 @@ impl ProxyConfig { ProxyConfig::None => None, ProxyConfig::Global { url } => Some(Proxy::all(url)?), ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| { - proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy + // first matching proxy + proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() })), }) } @@ -63,8 +64,10 @@ pub(crate) struct PartialProxyConfig { impl PartialProxyConfig { pub(crate) fn for_url(&self, url: &Url) -> Option<&Url> { let domain = url.domain()?; - let mut included_because = None; // most specific reason it was included - let mut excluded_because = None; // most specific reason it was excluded + // most specific reason it was included + let mut included_because = None; + // most specific reason it was excluded + let mut excluded_because = None; if self.include.is_empty() { // treat empty include list as `*` included_because = Some(&WildCardedDomain::WildCard); @@ -86,7 +89,8 @@ impl PartialProxyConfig { } } match (included_because, excluded_because) { - (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded + // included for a more specific reason than excluded + (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), (Some(_), None) => Some(&self.url), _ => None, } diff --git a/src/database.rs b/src/database.rs index 94de39c9..ea964a52 100644 --- a/src/database.rs +++ b/src/database.rs @@ -42,77 +42,122 @@ pub(crate) struct KeyValueDatabase { pub(super) userid_avatarurl: Arc, pub(super) userid_blurhash: Arc, pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + + // This is also used to check if a device exists + pub(super) userdeviceid_metadata: Arc, + + // DevicelistVersion = u64 + pub(super) userid_devicelistversion: Arc, pub(super) token_userdeviceid: Arc, - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + // OneTimeKeyId = UserId + DeviceKeyId + pub(super) onetimekeyid_onetimekeys: Arc, + + // LastOneTimeKeyUpdate = Count + pub(super) userid_lastonetimekeyupdate: Arc, + + // KeyChangeId = UserId/RoomId + Count + pub(super) keychangeid_userid: Arc, + + // KeyId = UserId + KeyId (depends on key type) + pub(super) keyid_key: Arc, pub(super) userid_masterkeyid: Arc, pub(super) userid_selfsigningkeyid: Arc, pub(super) userid_usersigningkeyid: Arc, - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + // UserFilterId = UserId + FilterId + pub(super) userfilterid_filter: Arc, - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count + // ToDeviceId = UserId + DeviceId + Count + pub(super) todeviceid_events: Arc, // Trees "owned" by `self::key_value::uiaa` - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + // User-interactive authentication + pub(super) userdevicesessionid_uiaainfo: Arc, pub(super) userdevicesessionid_uiaarequest: RwLock>, // Trees "owned" by `self::key_value::rooms::edus` - pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + // ReadReceiptId = RoomId + Count + UserId + pub(super) readreceiptid_readreceipt: Arc, + + // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_privateread: Arc, + + // LastPrivateReadUpdate = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, + + // PresenceId = RoomId + Count + UserId // This exists in the database already but is currently unused #[allow(dead_code)] - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) presenceid_presence: Arc, + + // LastPresenceUpdate = Count // This exists in the database already but is currently unused #[allow(dead_code)] - pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + pub(super) userid_lastpresenceupdate: Arc, // Trees "owned" by `self::key_value::rooms` - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + // PduId = ShortRoomId + Count + pub(super) pduid_pdu: Arc, pub(super) eventid_pduid: Arc, pub(super) roomid_pduleaves: Arc, pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + + // AliasId = RoomId + Count + pub(super) aliasid_alias: Arc, pub(super) publicroomids: Arc, - pub(super) threadid_userids: Arc, // ThreadId = RoomId + Count + // ThreadId = RoomId + Count + pub(super) threadid_userids: Arc, - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount + // TokenId = ShortRoomId + Token + PduIdCount + pub(super) tokenids: Arc, /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId + // RoomServerId = RoomId + ServerName + pub(super) roomserverids: Arc, + + // ServerRoomId = ServerName + RoomId + pub(super) serverroomids: Arc, pub(super) userroomid_joined: Arc, pub(super) roomuserid_joined: Arc, pub(super) roomid_joinedcount: Arc, pub(super) roomid_invitedcount: Arc, pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + + // InviteState = Vec> + pub(super) userroomid_invitestate: Arc, + + // InviteCount = Count + pub(super) roomuserid_invitecount: Arc, pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + // Rooms where incoming federation handling is disabled + pub(super) disabledroomids: Arc, - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + pub(super) lazyloadedids: Arc, - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - pub(super) roomuserid_lastnotificationread: Arc, // LastNotificationRead = u64 + // NotifyCount = u64 + pub(super) userroomid_notificationcount: Arc, + + // HightlightCount = u64 + pub(super) userroomid_highlightcount: Arc, + + // LastNotificationRead = u64 + pub(super) roomuserid_lastnotificationread: Arc, /// Remember the current state hash of a room. pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, + /// Remember the state hash at events in the past. pub(super) shorteventid_shortstatehash: Arc, + /// StateKey = EventType + StateKey, ShortStateKey = Count pub(super) statekey_shortstatekey: Arc, pub(super) shortstatekey_statekey: Arc, @@ -123,7 +168,9 @@ pub(crate) struct KeyValueDatabase { pub(super) eventid_shorteventid: Arc, pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + + // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + pub(super) shortstatehash_statediff: Arc, pub(super) shorteventid_authchain: Arc, @@ -134,26 +181,44 @@ pub(crate) struct KeyValueDatabase { /// ShortEventId + ShortEventId -> (). pub(super) tofrom_relation: Arc, + /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, // Trees "owned" by `self::key_value::account_data` - pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type - pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type + // RoomUserDataId = Room + User + Count + Type + pub(super) roomuserdataid_accountdata: Arc, + + // RoomUserType = Room + User + Type + pub(super) roomusertype_roomuserdataid: Arc, // Trees "owned" by `self::key_value::media` - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + pub(super) mediaid_file: Arc, + // Trees "owned" by `self::key_value::key_backups` - pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId + // BackupId = UserId + Version(Count) + pub(super) backupid_algorithm: Arc, + + // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, + + // BackupKeyId = UserId + Version + RoomId + SessionId + pub(super) backupkeyid_backup: Arc, // Trees "owned" by `self::key_value::transaction_ids` - pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) + // Response can be empty (/sendToDevice) or the event id (/send) + pub(super) userdevicetxnid_response: Arc, + // Trees "owned" by `self::key_value::sending` - pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content + // EduCount: Count of last EDU sync + pub(super) servername_educount: Arc, + + // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servernameevent_data: Arc, + + // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // Trees "owned" by `self::key_value::appservice` pub(super) id_appserviceregistrations: Arc, @@ -278,7 +343,8 @@ impl KeyValueDatabase { userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + // "Private" read receipt + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, roomuserid_lastprivatereadupdate: builder .open_tree("roomuserid_lastprivatereadupdate")?, presenceid_presence: builder.open_tree("presenceid_presence")?, @@ -601,7 +667,8 @@ impl KeyValueDatabase { current_sstatehash, Arc::new(statediffnew), Arc::new(statediffremoved), - 2, // every state change is 2 event changes on average + // every state change is 2 event changes on average + 2, states_parents, )?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 7609698a..1dc8a47d 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -275,7 +275,8 @@ impl KvTree for SqliteTable { backwards: bool, ) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); - let from = from.to_vec(); // TODO change interface? + // TODO change interface? + let from = from.to_vec(); if backwards { let statement = Box::leak(Box::new( diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 87c4e6f0..5b5b54aa 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -68,7 +68,8 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { let prefix2 = prefix.clone(); let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + // +1 so we don't send the event at since + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); Box::new( self.readreceiptid_readreceipt diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 0154d1a1..a2eba329 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -14,7 +14,8 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here + // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id); (key, Vec::new()) }); @@ -52,7 +53,8 @@ impl service::rooms::search::Data for KeyValueDatabase { last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first + // Newest pdus first + .iter_from(&last_possible_id, true) .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(key, _)| key[prefix3.len()..].to_vec()) }); diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index fd0c81e6..c7e042d2 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -21,7 +21,8 @@ impl service::rooms::state::Data for KeyValueDatabase { &self, room_id: &RoomId, new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; @@ -53,7 +54,8 @@ impl service::rooms::state::Data for KeyValueDatabase { &self, room_id: &RoomId, event_ids: Vec, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index be085b5c..a1a5025a 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -79,10 +79,11 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); + // TODO self.userroomid_leftstate.insert( &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO + )?; self.roomuserid_leftcount.insert( &roomuser_id, &services().globals.next_count()?.to_be_bytes(), diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 6d5977dd..f3564272 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -123,7 +123,8 @@ impl service::rooms::user::Data for KeyValueDatabase { .find(|(_, &b)| b == 0xff) .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? .0 - + 1; // +1 because the room id starts AFTER the separator + // +1 because the room id starts AFTER the separator + + 1; let room_id = key[roomid_index..].to_vec(); diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 0122d936..d437803f 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -215,7 +215,8 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&Device { device_id: device_id.into(), display_name: initial_device_display_name, - last_seen_ip: None, // TODO + // TODO + last_seen_ip: None, last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), }) .expect("Device::to_string never fails."), @@ -365,7 +366,8 @@ impl service::users::Data for KeyValueDatabase { prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark + // Annoying quotation mark + prefix.push(b'"'); prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); @@ -828,7 +830,8 @@ impl service::users::Data for KeyValueDatabase { for (key, _) in self .todeviceid_events - .iter_from(&last, true) // this includes last + // this includes last + .iter_from(&last, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(key, _)| { Ok::<_, Error>(( diff --git a/src/service/globals.rs b/src/service/globals.rs index 98b543c8..a72a5e06 100644 --- a/src/service/globals.rs +++ b/src/service/globals.rs @@ -43,16 +43,20 @@ use base64::{engine::general_purpose, Engine as _}; type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; -type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +// Time if last failed try, number of failed tries +type RateLimitState = (Instant, u32); type SyncHandle = ( - Option, // since - Receiver>>, // rx + // since + Option, + // rx + Receiver>>, ); pub(crate) struct Service { pub(crate) db: &'static dyn Data, - pub(crate) actual_destination_cache: Arc>, // actual_destination, host + // actual_destination, host + pub(crate) actual_destination_cache: Arc>, pub(crate) tls_name_override: Arc>, pub(crate) config: Config, keypair: Arc, @@ -69,7 +73,9 @@ pub(crate) struct Service { pub(crate) sync_receivers: RwLock>, pub(crate) roomid_mutex_insert: RwLock>>>, pub(crate) roomid_mutex_state: RwLock>>>, - pub(crate) roomid_mutex_federation: RwLock>>>, // this lock will be held longer + + // this lock will be held longer + pub(crate) roomid_mutex_federation: RwLock>>>, pub(crate) roomid_federationhandletime: RwLock>, pub(crate) stateres_mutex: Arc>, pub(crate) rotate: RotationHandler, diff --git a/src/service/media.rs b/src/service/media.rs index 7b12ab1f..76184374 100644 --- a/src/service/media.rs +++ b/src/service/media.rs @@ -113,8 +113,9 @@ impl Service { width: u32, height: u32, ) -> Result> { + // 0, 0 because that's the original file let (width, height, crop) = - Self::thumbnail_properties(width, height).unwrap_or((0, 0, false)); // 0, 0 because that's the original file + Self::thumbnail_properties(width, height).unwrap_or((0, 0, false)); if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) diff --git a/src/service/pusher.rs b/src/service/pusher.rs index 6e0fc755..90a3b90d 100644 --- a/src/service/pusher.rs +++ b/src/service/pusher.rs @@ -93,10 +93,11 @@ impl Service { .expect("http::response::Builder is usable"), ); + // TODO: handle timeout let body = response.bytes().await.unwrap_or_else(|e| { warn!("server error {}", e); Vec::new().into() - }); // TODO: handle timeout + }); if status != 200 { info!( @@ -201,7 +202,8 @@ impl Service { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently + // TODO: get member count efficiently + member_count: 10_u32.into(), user_id: user.to_owned(), user_display_name: services() .users diff --git a/src/service/rooms/edus/typing.rs b/src/service/rooms/edus/typing.rs index 44d3e6af..972db4a1 100644 --- a/src/service/rooms/edus/typing.rs +++ b/src/service/rooms/edus/typing.rs @@ -6,8 +6,10 @@ use tracing::trace; use crate::{services, utils, Result}; pub(crate) struct Service { - pub(crate) typing: RwLock>>, // u64 is unix timestamp of timeout - pub(crate) last_typing_update: RwLock>, // timestamp of the last change to typing users + // u64 is unix timestamp of timeout + pub(crate) typing: RwLock>>, + // timestamp of the last change to typing users + pub(crate) last_typing_update: RwLock>, pub(crate) typing_update_sender: broadcast::Sender, } diff --git a/src/service/rooms/event_handler.rs b/src/service/rooms/event_handler.rs index c334f6d8..8ff88088 100644 --- a/src/service/rooms/event_handler.rs +++ b/src/service/rooms/event_handler.rs @@ -446,7 +446,8 @@ impl Service { if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - None::, // TODO: third party invite + // TODO: third party invite + None::, |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? @@ -748,7 +749,8 @@ impl Service { let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - None::, // TODO: third party invite + // TODO: third party invite + None::, |k, s| { services() .rooms diff --git a/src/service/rooms/pdu_metadata.rs b/src/service/rooms/pdu_metadata.rs index 61128e5e..636a3a58 100644 --- a/src/service/rooms/pdu_metadata.rs +++ b/src/service/rooms/pdu_metadata.rs @@ -64,7 +64,8 @@ impl Service { let events_after: Vec<_> = services() .rooms .pdu_metadata - .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after + // TODO: should be relations_after + .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { filter_event_type.as_ref().map_or(true, |t| &&pdu.kind == t) @@ -90,14 +91,16 @@ impl Service { .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); next_token = events_after.last().map(|(count, _)| count).copied(); let events_after: Vec<_> = events_after .into_iter() - .rev() // relations are always most recent first + // relations are always most recent first + .rev() .map(|(_, pdu)| pdu.to_message_like_event()) .collect(); @@ -137,7 +140,8 @@ impl Service { .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); next_token = events_before.last().map(|(count, _)| count).copied(); @@ -167,7 +171,8 @@ impl Service { let target = match services().rooms.timeline.get_pdu_count(target)? { Some(PduCount::Normal(c)) => c, // TODO: Support backfilled relations - _ => 0, // This will result in an empty iterator + // This will result in an empty iterator + _ => 0, }; self.db.relations_until(user_id, room_id, target, until) } diff --git a/src/service/rooms/state.rs b/src/service/rooms/state.rs index daa17a58..0d4f91a0 100644 --- a/src/service/rooms/state.rs +++ b/src/service/rooms/state.rs @@ -35,7 +35,8 @@ impl Service { shortstatehash: u64, statediffnew: Arc>, _statediffremoved: Arc>, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, ) -> Result<()> { for event_id in statediffnew.iter().filter_map(|new| { services() @@ -169,7 +170,8 @@ impl Service { shortstatehash, statediffnew, statediffremoved, - 1_000_000, // high number because no state will be based on this one + // high number because no state will be based on this one + 1_000_000, states_parents, )?; } @@ -315,7 +317,8 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + mutex_lock: &MutexGuard<'_, ()>, ) -> Result<()> { self.db.set_room_state(room_id, shortstatehash, mutex_lock) } @@ -358,7 +361,8 @@ impl Service { &self, room_id: &RoomId, event_ids: Vec, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, ) -> Result<()> { self.db .set_forward_extremities(room_id, event_ids, state_lock) diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 1074a5dd..8e267760 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -12,7 +12,8 @@ pub(crate) trait Data: Send + Sync { &self, room_id: &RoomId, new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, ) -> Result<()>; /// Associates a state with an event. @@ -26,6 +27,7 @@ pub(crate) trait Data: Send + Sync { &self, room_id: &RoomId, event_ids: Vec, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, ) -> Result<()>; } diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache.rs index 78aba47b..9b15cd88 100644 --- a/src/service/rooms/state_cache.rs +++ b/src/service/rooms/state_cache.rs @@ -151,8 +151,10 @@ impl Service { let is_ignored = services() .account_data .get( - None, // Ignored users are in global account data - user_id, // Receiver + // Ignored users are in global account data + None, + // Receiver + user_id, GlobalAccountDataEventType::IgnoredUserList .to_string() .into(), diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor.rs index 4f91d2e4..13f0912a 100644 --- a/src/service/rooms/state_compressor.rs +++ b/src/service/rooms/state_compressor.rs @@ -21,10 +21,14 @@ pub(crate) struct Service { LruCache< u64, Vec<( - u64, // shortstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + // shortstatehash + u64, + // full state + Arc>, + // added + Arc>, + // removed + Arc>, )>, >, >, @@ -41,10 +45,14 @@ impl Service { shortstatehash: u64, ) -> Result< Vec<( - u64, // shortstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + // shortstatehash + u64, + // full state + Arc>, + // added + Arc>, + // removed + Arc>, )>, > { if let Some(r) = self @@ -152,10 +160,14 @@ impl Service { statediffremoved: Arc>, diff_to_sibling: usize, mut parent_states: Vec<( - u64, // shortstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed + // shortstatehash + u64, + // full state + Arc>, + // added + Arc>, + // removed + Arc>, )>, ) -> Result<()> { let diffsum = statediffnew.len() + statediffremoved.len(); @@ -318,7 +330,8 @@ impl Service { new_shortstatehash, statediffnew.clone(), statediffremoved.clone(), - 2, // every state change is 2 event changes on average + // every state change is 2 event changes on average + 2, states_parents, )?; }; diff --git a/src/service/rooms/timeline.rs b/src/service/rooms/timeline.rs index eb7db36f..42b802bc 100644 --- a/src/service/rooms/timeline.rs +++ b/src/service/rooms/timeline.rs @@ -177,7 +177,8 @@ impl Service { pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, leaves: Vec, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, ) -> Result> { let shortroomid = services() .rooms @@ -527,7 +528,8 @@ impl Service { .threads .add_to_thread(&thread.event_id, pdu)?; } - _ => {} // TODO: Aggregate other types + // TODO: Aggregate other types + _ => {} } } @@ -598,7 +600,8 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -702,7 +705,8 @@ impl Service { let auth_check = state_res::auth_check( &room_version, &pdu, - None::, // TODO: third_party_invite + // TODO: third_party_invite + None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) .map_err(|e| { @@ -781,7 +785,8 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, ) -> Result> { let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; @@ -990,7 +995,8 @@ impl Service { new_room_leaves: Vec, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. diff --git a/src/service/sending.rs b/src/service/sending.rs index 9cfb8cfe..ebfaf379 100644 --- a/src/service/sending.rs +++ b/src/service/sending.rs @@ -47,7 +47,8 @@ use tracing::{debug, error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub(crate) enum OutgoingKind { Appservice(String), - Push(OwnedUserId, String), // user and pushkey + // user and pushkey + Push(OwnedUserId, String), Normal(OwnedServerName), } @@ -81,8 +82,10 @@ impl OutgoingKind { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub(crate) enum SendingEventType { - Pdu(Vec), // pduid - Edu(Vec), // pdu json + // pduid + Pdu(Vec), + // pdu json + Edu(Vec), } pub(crate) struct Service { @@ -96,8 +99,10 @@ pub(crate) struct Service { enum TransactionStatus { Running, - Failed(u32, Instant), // number of times failed, time of last failure - Retrying(u32), // number of times failed + // number of times failed, time of last failure + Failed(u32, Instant), + // number of times failed + Retrying(u32), } impl Service { @@ -203,7 +208,8 @@ impl Service { fn select_events( &self, outgoing_kind: &OutgoingKind, - new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key + // Events we want to send: event and full key + new_events: Vec<(SendingEventType, Vec)>, current_transaction_status: &mut HashMap, ) -> Result>> { let mut retry = false; @@ -214,7 +220,8 @@ impl Service { entry .and_modify(|e| match e { TransactionStatus::Running | TransactionStatus::Retrying(_) => { - allow = false; // already running + // already running + allow = false; } TransactionStatus::Failed(tries, time) => { // Fail if a request has failed recently (exponential backoff) @@ -444,7 +451,6 @@ impl Service { /// Cleanup event data /// Used for instance after we remove an appservice registration - /// #[tracing::instrument(skip(self))] pub(crate) fn cleanup_events(&self, appservice_id: String) -> Result<()> { self.db @@ -543,9 +549,8 @@ impl Service { })?, ); } - SendingEventType::Edu(_) => { - // Push gateways don't need EDUs (?) - } + // Push gateways don't need EDUs (?) + SendingEventType::Edu(_) => {} } } diff --git a/src/service/uiaa.rs b/src/service/uiaa.rs index 1efd1d85..1cb026d9 100644 --- a/src/service/uiaa.rs +++ b/src/service/uiaa.rs @@ -29,7 +29,8 @@ impl Service { self.db.set_uiaa_request( user_id, device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) + // TODO: better session error handling (why is it optional in ruma?) + uiaainfo.session.as_ref().expect("session should be set"), json_body, )?; self.db.update_uiaa_session( diff --git a/src/service/users.rs b/src/service/users.rs index 53882040..b69c98b0 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -28,7 +28,8 @@ use crate::{services, Error, Result}; pub(crate) struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, // For every room, the roomsince number + // For every room, the roomsince number + known_rooms: BTreeMap>, extensions: ExtensionsConfig, } diff --git a/src/utils.rs b/src/utils.rs index a335e29c..194f0eef 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -27,7 +27,8 @@ pub(crate) fn increment(old: Option<&[u8]>) -> Vec { let number = u64::from_be_bytes(bytes); number + 1 } - _ => 1, // Start at one. since 0 should return the first event in the db + // Start at one. since 0 should return the first event in the db + _ => 1, }; number.to_be_bytes().to_vec() @@ -97,10 +98,12 @@ where other_iterators.iter_mut().all(|it| { while let Some(element) = it.peek() { match check_order(element, target) { - Ordering::Greater => return false, // We went too far - Ordering::Equal => return true, // Element is in both iters + // We went too far + Ordering::Greater => return false, + // Element is in both iters + Ordering::Equal => return true, + // Keep searching Ordering::Less => { - // Keep searching it.next(); } } diff --git a/src/utils/error.rs b/src/utils/error.rs index ae8a7aff..c0655de5 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -63,8 +63,9 @@ pub(crate) enum Error { Uiaa(UiaaInfo), #[error("{0}: {1}")] BadRequest(ErrorKind, &'static str), + // This is only needed for when a room alias already exists #[error("{0}")] - Conflict(&'static str), // This is only needed for when a room alias already exists + Conflict(&'static str), #[error("{0}")] Extension(#[from] axum::extract::rejection::ExtensionRejection), #[error("{0}")]