SSS: fix timeline pagination

This commit is contained in:
Lambda 2025-05-16 18:45:12 +00:00
parent 891eb410cc
commit 166a269034
2 changed files with 67 additions and 9 deletions

View file

@ -375,8 +375,9 @@ pub(crate) async fn sync_events_v5_route(
services().users.forget_sync_request_connection(&connection_key);
}
let known_rooms =
services().users.get_rooms_in_connection(connection_key.clone());
let known_rooms = services()
.users
.get_rooms_in_connection(connection_key.clone(), globalsince);
let all_joined_rooms = joined_rooms_data(&sender_user);
@ -438,6 +439,7 @@ pub(crate) async fn sync_events_v5_route(
connection_key.clone(),
todo_rooms.keys().cloned().collect(),
globalsince,
next_batch,
);
let mut rooms = BTreeMap::new();

View file

@ -19,9 +19,24 @@ mod data;
pub(crate) use data::Data;
#[derive(Debug)]
struct KnownRooms {
/// The `pos` value of the request that these `room_since` values apply to
pos: u64,
/// `since` values for rooms that have been sent previously
room_since: BTreeMap<OwnedRoomId, u64>,
}
#[derive(Debug, Default)]
pub(crate) struct SlidingSyncCache {
known_rooms: BTreeMap<OwnedRoomId, u64>,
/// `since` values for rooms in the current/previous request. Needed in
/// case the response doesn't arrive and the client requests the same
/// `pos` value again.
current_known_rooms: Option<KnownRooms>,
/// Overlay on top of `current_known_rooms` of `since` values for rooms in
/// the expected next request (where the `pos` value should be the
/// `pos` value from our response).
next_known_rooms: Option<KnownRooms>,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
@ -66,28 +81,69 @@ impl Service {
self.connections.lock().unwrap().remove(connection_key);
}
#[tracing::instrument(skip(self))]
pub(crate) fn get_rooms_in_connection(
&self,
connection_key: ConnectionKey,
pos: u64,
) -> BTreeMap<OwnedRoomId, u64> {
let cached = self.get_cache_entry(connection_key);
let cached = cached.lock().unwrap();
let mut cached = cached.lock().unwrap();
let cached = &mut *cached;
cached.known_rooms.clone()
let current_known_rooms =
cached.current_known_rooms.get_or_insert(KnownRooms {
pos,
room_since: BTreeMap::new(),
});
if current_known_rooms.pos == pos {
// Another request for a previous `pos` value, invalidate the next
// result
cached.next_known_rooms = None;
} else if let Some(next_known_rooms) =
cached.next_known_rooms.take().filter(|x| x.pos == pos)
{
// Merge overlay into current_known_rooms
current_known_rooms.pos = next_known_rooms.pos;
current_known_rooms.room_since.extend(next_known_rooms.room_since);
} else {
// Not a repeated request, and we don't have calculated values for a
// next request, start over
*current_known_rooms = KnownRooms {
pos,
room_since: BTreeMap::new(),
};
}
current_known_rooms.room_since.clone()
}
#[tracing::instrument(skip(self))]
pub(crate) fn update_sync_known_rooms(
&self,
connection_key: ConnectionKey,
new_cached_rooms: BTreeSet<OwnedRoomId>,
globalsince: u64,
pos: u64,
next_batch: u64,
) {
let cached = self.get_cache_entry(connection_key);
let mut cached = cached.lock().unwrap();
for roomid in new_cached_rooms {
cached.known_rooms.insert(roomid, globalsince);
}
assert_eq!(
cached.current_known_rooms.as_ref().map(|x| x.pos),
Some(pos),
"current known rooms should match current request's pos"
);
// Add an overlay to the current request's known rooms
cached.next_known_rooms = Some(KnownRooms {
pos: next_batch,
room_since: new_cached_rooms
.into_iter()
.map(|room_id| (room_id, next_batch))
.collect(),
});
}
/// Check if account is deactivated