From 166a2690349f0ba6deffaf25c2cf42a4d4568e0c Mon Sep 17 00:00:00 2001 From: Lambda Date: Fri, 16 May 2025 18:45:12 +0000 Subject: [PATCH] SSS: fix timeline pagination --- src/api/client_server/sync/msc4186.rs | 6 ++- src/service/users.rs | 70 ++++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index a3726faa..5b210702 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -375,8 +375,9 @@ pub(crate) async fn sync_events_v5_route( services().users.forget_sync_request_connection(&connection_key); } - let known_rooms = - services().users.get_rooms_in_connection(connection_key.clone()); + let known_rooms = services() + .users + .get_rooms_in_connection(connection_key.clone(), globalsince); let all_joined_rooms = joined_rooms_data(&sender_user); @@ -438,6 +439,7 @@ pub(crate) async fn sync_events_v5_route( connection_key.clone(), todo_rooms.keys().cloned().collect(), globalsince, + next_batch, ); let mut rooms = BTreeMap::new(); diff --git a/src/service/users.rs b/src/service/users.rs index 928843ae..192b0597 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -19,9 +19,24 @@ mod data; pub(crate) use data::Data; +#[derive(Debug)] +struct KnownRooms { + /// The `pos` value of the request that these `room_since` values apply to + pos: u64, + /// `since` values for rooms that have been sent previously + room_since: BTreeMap, +} + #[derive(Debug, Default)] pub(crate) struct SlidingSyncCache { - known_rooms: BTreeMap, + /// `since` values for rooms in the current/previous request. Needed in + /// case the response doesn't arrive and the client requests the same + /// `pos` value again. + current_known_rooms: Option, + /// Overlay on top of `current_known_rooms` of `since` values for rooms in + /// the expected next request (where the `pos` value should be the + /// `pos` value from our response). + next_known_rooms: Option, } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -66,28 +81,69 @@ impl Service { self.connections.lock().unwrap().remove(connection_key); } + #[tracing::instrument(skip(self))] pub(crate) fn get_rooms_in_connection( &self, connection_key: ConnectionKey, + pos: u64, ) -> BTreeMap { let cached = self.get_cache_entry(connection_key); - let cached = cached.lock().unwrap(); + let mut cached = cached.lock().unwrap(); + let cached = &mut *cached; - cached.known_rooms.clone() + let current_known_rooms = + cached.current_known_rooms.get_or_insert(KnownRooms { + pos, + room_since: BTreeMap::new(), + }); + + if current_known_rooms.pos == pos { + // Another request for a previous `pos` value, invalidate the next + // result + cached.next_known_rooms = None; + } else if let Some(next_known_rooms) = + cached.next_known_rooms.take().filter(|x| x.pos == pos) + { + // Merge overlay into current_known_rooms + current_known_rooms.pos = next_known_rooms.pos; + current_known_rooms.room_since.extend(next_known_rooms.room_since); + } else { + // Not a repeated request, and we don't have calculated values for a + // next request, start over + *current_known_rooms = KnownRooms { + pos, + room_since: BTreeMap::new(), + }; + } + + current_known_rooms.room_since.clone() } + #[tracing::instrument(skip(self))] pub(crate) fn update_sync_known_rooms( &self, connection_key: ConnectionKey, new_cached_rooms: BTreeSet, - globalsince: u64, + pos: u64, + next_batch: u64, ) { let cached = self.get_cache_entry(connection_key); let mut cached = cached.lock().unwrap(); - for roomid in new_cached_rooms { - cached.known_rooms.insert(roomid, globalsince); - } + assert_eq!( + cached.current_known_rooms.as_ref().map(|x| x.pos), + Some(pos), + "current known rooms should match current request's pos" + ); + + // Add an overlay to the current request's known rooms + cached.next_known_rooms = Some(KnownRooms { + pos: next_batch, + room_since: new_cached_rooms + .into_iter() + .map(|room_id| (room_id, next_batch)) + .collect(), + }); } /// Check if account is deactivated