From 33da7dcd9680c0e84da3fdc13557ba57b9ab018c Mon Sep 17 00:00:00 2001 From: Lambda Date: Fri, 4 Apr 2025 17:47:02 +0000 Subject: [PATCH 01/33] Check account data type match before storing to DB --- src/database/key_value/account_data.rs | 51 ++++++++++++++++++-------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 49231a2b..101986f8 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use ruma::{api::client::error::ErrorKind, RoomId, UserId}; use serde::Deserialize; use serde_json::value::RawValue; +use tracing::error; use crate::{ database::KeyValueDatabase, service, services, utils, Error, Result, @@ -16,16 +17,6 @@ impl service::account_data::Data for KeyValueDatabase { event_type: &str, data: &RawValue, ) -> Result<()> { - // Allowed because we just use this type to validate the schema, and - // don't read the fields. - #[allow(dead_code)] - #[derive(Deserialize)] - struct ExtractEventFields<'a> { - #[serde(rename = "type")] - event_type: &'a str, - content: &'a RawValue, - } - let mut prefix = room_id .map(ToString::to_string) .unwrap_or_default() @@ -44,11 +35,41 @@ impl service::account_data::Data for KeyValueDatabase { let mut key = prefix; key.extend_from_slice(event_type.as_bytes()); - if serde_json::from_str::>(data.get()).is_err() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); + { + #[derive(Deserialize)] + struct ExtractEventFields<'a> { + #[serde(rename = "type")] + event_type: &'a str, + // Allowed because we just use this type to validate the schema + // and event type, and don't extract the content. + #[allow(dead_code)] + content: &'a RawValue, + } + + let Ok(ExtractEventFields { + event_type: serialised_event_type, + .. + }) = serde_json::from_str(data.get()) + else { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Account data doesn't have all required fields.", + )); + }; + + if serialised_event_type != event_type { + error!( + %user_id, + ?room_id, + event_type, + serialised_event_type, + "Mismatch between discrete and serialised account data event type" + ); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Account data event type mismatch.", + )); + } } self.roomuserdataid_accountdata From bbd04390013fa0dc5d6f5e1f3510e6a0aca37180 Mon Sep 17 00:00:00 2001 From: Lambda Date: Wed, 16 Apr 2025 18:35:34 +0000 Subject: [PATCH 02/33] Add documentation for Short*Id types --- src/service/rooms/short.rs | 59 ++++++++++++++++++++++++++++---------- 1 file changed, 44 insertions(+), 15 deletions(-) diff --git a/src/service/rooms/short.rs b/src/service/rooms/short.rs index ce4d207c..35cef167 100644 --- a/src/service/rooms/short.rs +++ b/src/service/rooms/short.rs @@ -9,27 +9,56 @@ use crate::{ }; macro_rules! short_id_type { - ($name:ident) => { - #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] - #[repr(transparent)] - pub(crate) struct $name(u64); + ($($(#[$doc:meta])* struct $name:ident(u64);)*) => { + $( + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[repr(transparent)] + $(#[$doc])* + pub(crate) struct $name(u64); - impl $name { - pub(crate) fn new(id: u64) -> Self { - Self(id) - } + impl $name { + pub(crate) fn new(id: u64) -> Self { + Self(id) + } - pub(crate) fn get(&self) -> u64 { - self.0 + pub(crate) fn get(&self) -> u64 { + self.0 + } } - } + )* }; } -short_id_type!(ShortRoomId); -short_id_type!(ShortEventId); -short_id_type!(ShortStateHash); -short_id_type!(ShortStateKey); +short_id_type!( + /// Interned [RoomId]. + /// + /// Created using [`get_shortroomid()`](Service::get_shortroomid) or + /// [`get_or_create_shortroomid()`](Service::get_or_create_shortroomid). + struct ShortRoomId(u64); + /// Interned [EventId]. + /// + /// Created using + /// [`get_or_create_shorteventid()`](Service::get_or_create_shorteventid), + /// resolved using + /// [`get_eventid_from_short()`](Service::get_eventid_from_short). + struct ShortEventId(u64); + /// Interned hash of concatenated state events. + /// + /// Equal state sets do not necessarily correspond to equal short state + /// hashes, because the calculated hash is dependent on `HashSet` + /// iteration order. + /// + /// Created using + /// [`get_or_create_shortstatehash()`](Service::get_or_create_shortstatehash). + struct ShortStateHash(u64); + /// Interned `(event type, state key)` tuple. + /// + /// Created using [`get_shortstatekey()`](Service::get_shortstatekey) or + /// [`get_or_create_shortstatekey()`](Service::get_or_create_shortstatekey), + /// resolved using + /// [`get_statekey_from_short()`](Service::get_statekey_from_short). + struct ShortStateKey(u64); +); mod data; From dd705a2bb205b84a1f21c611392a1391f4751e78 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 10:49:50 +0000 Subject: [PATCH 03/33] Revert "Remove support for MSC3575 (sliding sync)" This reverts commit d87848b9a6073d8497a5f860876bc8dec858dda7. --- Cargo.toml | 1 + book/changelog.md | 2 - src/api/client_server/sync.rs | 1 + src/api/client_server/sync/msc3575.rs | 671 ++++++++++++++++++++++++++ src/api/well_known.rs | 9 + src/cli/serve.rs | 1 + src/config.rs | 3 + src/service/rooms/state_accessor.rs | 40 +- src/service/users.rs | 233 ++++++++- 9 files changed, 954 insertions(+), 7 deletions(-) create mode 100644 src/api/client_server/sync/msc3575.rs diff --git a/Cargo.toml b/Cargo.toml index bdea44fb..6cfd3f65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,6 +165,7 @@ features = [ "state-res", "unstable-msc2448", "ring-compat", + "unstable-msc3575", ] [target.'cfg(unix)'.dependencies] diff --git a/book/changelog.md b/book/changelog.md index 40a8abea..3729bfbb 100644 --- a/book/changelog.md +++ b/book/changelog.md @@ -87,8 +87,6 @@ This will be the first release of Grapevine since it was forked from Conduit * Instead, it is now possible to configure each cache capacity individually. 10. Remove jemalloc support. ([!93](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/193)) -11. Removed support for MSC3575 (sliding sync), which has been closed. - ([!198](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/198)) ### Changed diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 4ad37425..2a17aa17 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -5,6 +5,7 @@ use crate::{ service::rooms::timeline::PduCount, services, Error, PduEvent, Result, }; +pub(crate) mod msc3575; pub(crate) mod v3; fn load_timeline( diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs new file mode 100644 index 00000000..31333eaf --- /dev/null +++ b/src/api/client_server/sync/msc3575.rs @@ -0,0 +1,671 @@ +//! [MSC3575], aka Sliding Sync, aka Sync v3 (even though the endpoint is called +//! /v4) support +//! +//! [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 + +use std::{ + collections::{BTreeMap, BTreeSet, HashSet}, + time::Duration, +}; + +use ruma::{ + api::client::{ + sync::sync_events::{ + self, v4::SlidingOp, DeviceLists, UnreadNotificationsCount, + }, + uiaa::UiaaResponse, + }, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + uint, JsOption, UInt, UserId, +}; +use tracing::{debug, error}; + +use super::{load_timeline, share_encrypted_room}; +use crate::{ + service::{account_data, rooms::timeline::PduCount}, + services, Ar, Error, Ra, Result, +}; + +#[allow(clippy::too_many_lines)] +pub(crate) async fn sync_events_v4_route( + body: Ar, +) -> Result, Ra> { + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let next_batch = services().globals.next_count()?; + + let globalsince = + body.pos.as_ref().and_then(|string| string.parse().ok()).unwrap_or(0); + + if globalsince == 0 { + if let Some(conn_id) = &body.conn_id { + services().users.forget_sync_request_connection( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + ); + } + } + + // Get sticky parameters from cache + let known_rooms = services().users.update_sync_request_with_cache( + sender_user.clone(), + sender_device.clone(), + &mut body, + ); + + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .filter_map(Result::ok) + .collect::>(); + + if body.extensions.to_device.enabled.unwrap_or(false) { + services().users.remove_to_device_events( + &sender_user, + &sender_device, + globalsince, + )?; + } + + // Users that have left any encrypted rooms the sender was in + let mut left_encrypted_users = HashSet::new(); + let mut device_list_changes = HashSet::new(); + let mut device_list_left = HashSet::new(); + + if body.extensions.e2ee.enabled.unwrap_or(false) { + // Look for device list updates of this account + device_list_changes.extend( + services() + .users + .keys_changed(sender_user.as_ref(), globalsince, None) + .filter_map(Result::ok), + ); + + for room_id in &all_joined_rooms { + let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + else { + error!(%room_id, "Room has no state"); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(room_id, globalsince)?; + + let since_sender_member: Option = + since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + }) + .ok() + }); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get( + current_shortstatehash, + &StateEventType::RoomEncryption, + "", + )? + .is_some(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = + services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + let joined_since_last_sync = + since_sender_member.is_none_or(|member| { + member.membership != MembershipState::Join + }); + + let new_encrypted_room = + encrypted_room && since_encryption.is_none(); + if encrypted_room { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, event_id) in current_state_ids { + if since_state_ids.get(&key) != Some(&event_id) { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = + UserId::parse(state_key.clone()) + .map_err(|_| { + Error::bad_database( + "Invalid UserId in member \ + PDU.", + ) + })?; + + if user_id == sender_user { + continue; + } + + let new_membership = + serde_json::from_str::< + RoomMemberEventContent, + >( + pdu.content.get() + ) + .map_err(|_| { + Error::bad_database( + "Invalid PDU in database.", + ) + })? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted + // room + if !share_encrypted_room( + &sender_user, + &user_id, + room_id, + )? { + device_list_changes + .insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left + // encrypted rooms we are in + left_encrypted_users + .insert(user_id); + } + _ => {} + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all + // joined users + device_list_changes.extend( + services() + .rooms + .state_cache + .room_members(room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to + // the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't + // share an encrypted room with the target + // already + !share_encrypted_room( + &sender_user, + user_id, + room_id, + ) + .unwrap_or(false) + }), + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), globalsince, None) + .filter_map(Result::ok), + ); + } + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .filter_map(Result::ok) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get( + &other_room_id, + &StateEventType::RoomEncryption, + "", + ) + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target + // anymore, we need to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + } + + let mut lists = BTreeMap::new(); + // and required state + let mut todo_rooms = BTreeMap::new(); + + for (list_id, list) in body.lists { + if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { + continue; + } + + let mut new_known_rooms = BTreeSet::new(); + + lists.insert( + list_id.clone(), + sync_events::v4::SyncList { + ops: list + .ranges + .into_iter() + .map(|mut r| { + r.0 = r.0.clamp( + uint!(0), + UInt::try_from(all_joined_rooms.len() - 1) + .unwrap_or(UInt::MAX), + ); + r.1 = r.1.clamp( + r.0, + UInt::try_from(all_joined_rooms.len() - 1) + .unwrap_or(UInt::MAX), + ); + let room_ids = all_joined_rooms[r + .0 + .try_into() + .unwrap_or(usize::MAX) + ..=r.1.try_into().unwrap_or(usize::MAX)] + .to_vec(); + new_known_rooms.extend(room_ids.iter().cloned()); + for room_id in &room_ids { + let todo_room = todo_rooms + .entry(room_id.clone()) + .or_insert((BTreeSet::new(), 0, u64::MAX)); + let limit = list + .room_details + .timeline_limit + .map_or(10, u64::from) + .min(100); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .cloned(), + ); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(&list_id) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some(r), + index: None, + room_ids, + room_id: None, + } + }) + .collect(), + count: UInt::try_from(all_joined_rooms.len()) + .unwrap_or(UInt::MAX), + }, + ); + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + list_id, + new_known_rooms, + globalsince, + ); + } + } + + let mut known_subscription_rooms = BTreeSet::new(); + for (room_id, room) in &body.room_subscriptions { + if !services().rooms.metadata.exists(room_id)? { + continue; + } + let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + BTreeSet::new(), + 0, + u64::MAX, + )); + let limit = room.timeline_limit.map_or(10, u64::from).min(100); + todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get("subscriptions") + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + known_subscription_rooms.insert(room_id.clone()); + } + + for r in body.unsubscribe_rooms { + known_subscription_rooms.remove(&r); + body.room_subscriptions.remove(&r); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_known_rooms( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); + } + + if let Some(conn_id) = &body.conn_id { + services().users.update_sync_subscriptions( + sender_user.clone(), + sender_device.clone(), + conn_id.clone(), + body.room_subscriptions, + ); + } + + let mut rooms = BTreeMap::new(); + for (room_id, (required_state_request, timeline_limit, roomsince)) in + &todo_rooms + { + let roomsincecount = PduCount::Normal(*roomsince); + + let (timeline_pdus, limited) = load_timeline( + &sender_user, + room_id, + roomsincecount, + *timeline_limit, + )?; + + if roomsince != &0 && timeline_pdus.is_empty() { + continue; + } + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("Timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + })) + })? + .or_else(|| (roomsince != &0).then(|| roomsince.to_string())); + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let required_state = required_state_request + .iter() + .filter_map(|state| { + services() + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + .ok() + .flatten() + .map(|state| state.to_sync_state_event()) + }) + .collect(); + + // Heroes + let heroes = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member != &sender_user) + .filter_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }) + }) + .take(5) + .collect::>(); + let name = match &*heroes { + [] => None, + [only] => Some(only.0.clone()), + [firsts @ .., last] => Some({ + let firsts = firsts + .iter() + .map(|h| h.0.clone()) + .collect::>() + .join(", "); + + format!("{firsts} and {}", last.0) + }), + }; + + let avatar = if let [only] = &*heroes { + only.1.clone() + } else { + None + }; + + rooms.insert( + room_id.clone(), + sync_events::v4::SlidingSyncRoom { + name: services() + .rooms + .state_accessor + .get_name(room_id)? + .or(name), + avatar: if let Some(avatar) = avatar { + JsOption::Some(avatar) + } else { + match services().rooms.state_accessor.get_avatar(room_id)? { + JsOption::Some(avatar) => { + JsOption::from_option(avatar.url) + } + JsOption::Null => JsOption::Null, + JsOption::Undefined => JsOption::Undefined, + } + }, + initial: Some(roomsince == &0), + is_dm: None, + invite_state: None, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services() + .rooms + .user + .highlight_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services() + .rooms + .user + .notification_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services() + .rooms + .state_cache + .room_joined_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + invited_count: Some( + services() + .rooms + .state_cache + .room_invited_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + // Count events in timeline greater than global sync counter + num_live: None, + timestamp: None, + // TODO + heroes: None, + }, + ); + } + + if rooms + .iter() + .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + match tokio::time::timeout(duration, watcher).await { + Ok(x) => x.expect("watcher should succeed"), + Err(error) => debug!(%error, "Timed out"), + }; + } + + Ok(Ra(sync_events::v4::Response { + initial: globalsince == 0, + txn_id: body.txn_id.clone(), + pos: next_batch.to_string(), + lists, + rooms, + extensions: sync_events::v4::Extensions { + to_device: body + .extensions + .to_device + .enabled + .unwrap_or(false) + .then(|| { + services() + .users + .get_to_device_events(&sender_user, &sender_device) + .map(|events| sync_events::v4::ToDevice { + events, + next_batch: next_batch.to_string(), + }) + }) + .transpose()?, + e2ee: sync_events::v4::E2EE { + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }, + account_data: sync_events::v4::AccountData { + global: if body.extensions.account_data.enabled.unwrap_or(false) + { + services() + .account_data + .global_changes_since(&sender_user, globalsince)? + .into_iter() + .map(|(event_type, content)| { + account_data::raw_global_event_from_parts( + &event_type, + &content, + ) + }) + .collect() + } else { + Vec::new() + }, + rooms: BTreeMap::new(), + }, + receipts: sync_events::v4::Receipts { + rooms: BTreeMap::new(), + }, + typing: sync_events::v4::Typing { + rooms: BTreeMap::new(), + }, + }, + delta_token: None, + })) +} diff --git a/src/api/well_known.rs b/src/api/well_known.rs index edb5a347..44210e55 100644 --- a/src/api/well_known.rs +++ b/src/api/well_known.rs @@ -37,5 +37,14 @@ pub(crate) async fn client(_: Ar) -> Ra { Ra(client::Response { homeserver: client::HomeserverInfo::new(base_url.clone()), identity_server: None, + sliding_sync_proxy: services() + .globals + .config + .server_discovery + .client + .advertise_sliding_sync + .then_some(client::SlidingSyncProxyInfo { + url: base_url, + }), }) } diff --git a/src/cli/serve.rs b/src/cli/serve.rs index f0e0f969..7a528748 100644 --- a/src/cli/serve.rs +++ b/src/cli/serve.rs @@ -651,6 +651,7 @@ fn client_routes() -> Router { .ruma_route(c2s::get_state_events_route) .ruma_route(c2s::get_state_events_for_key_route) .ruma_route(c2s::v3::sync_events_route) + .ruma_route(c2s::msc3575::sync_events_v4_route) .ruma_route(c2s::get_context_route) .ruma_route(c2s::get_message_events_route) .ruma_route(c2s::search_events_route) diff --git a/src/config.rs b/src/config.rs index 5924d232..b597e4a9 100644 --- a/src/config.rs +++ b/src/config.rs @@ -151,6 +151,9 @@ pub(crate) struct ServerServerDiscovery { pub(crate) struct ClientServerDiscovery { /// The base URL to make client-server API requests to pub(crate) base_url: Url, + + #[serde(default, rename = "advertise_buggy_sliding_sync")] + pub(crate) advertise_sliding_sync: bool, } #[derive(Debug, Deserialize)] diff --git a/src/service/rooms/state_accessor.rs b/src/service/rooms/state_accessor.rs index 78af94ed..3c859a01 100644 --- a/src/service/rooms/state_accessor.rs +++ b/src/service/rooms/state_accessor.rs @@ -7,6 +7,7 @@ use lru_cache::LruCache; use ruma::{ events::{ room::{ + avatar::RoomAvatarEventContent, history_visibility::{ HistoryVisibility, RoomHistoryVisibilityEventContent, }, @@ -17,8 +18,8 @@ use ruma::{ StateEventType, }, state_res::Event, - EventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, - UserId, + EventId, JsOption, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + ServerName, UserId, }; use serde_json::value::to_raw_value; use tracing::{error, warn}; @@ -507,6 +508,23 @@ impl Service { ) } + #[tracing::instrument(skip(self))] + pub(crate) fn get_avatar( + &self, + room_id: &RoomId, + ) -> Result> { + self.room_state_get(room_id, &StateEventType::RoomAvatar, "")?.map_or( + Ok(JsOption::Undefined), + |s| { + serde_json::from_str(s.content.get()).map_err(|_| { + Error::bad_database( + "Invalid room avatar event in database.", + ) + }) + }, + ) + } + // Allowed because this function uses `services()` #[allow(clippy::unused_self)] #[tracing::instrument(skip(self), ret(level = "trace"))] @@ -535,6 +553,24 @@ impl Service { .is_ok() } + #[tracing::instrument(skip(self))] + pub(crate) fn get_member( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { + self.room_state_get( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()).map_err(|_| { + Error::bad_database("Invalid room member event in database.") + }) + }) + } + /// Checks if a given user can redact a given event /// /// If `federation` is `true`, it allows redaction events from any user of diff --git a/src/service/users.rs b/src/service/users.rs index dbacb715..9df6e835 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -1,12 +1,23 @@ -use std::{collections::BTreeMap, mem}; +use std::{ + collections::{BTreeMap, BTreeSet}, + mem, + sync::{Arc, Mutex}, +}; use ruma::{ - api::client::{device::Device, filter::FilterDefinition}, + api::client::{ + device::Device, + filter::FilterDefinition, + sync::sync_events::{ + self, + v4::{ExtensionsConfig, SyncRequestList}, + }, + }, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, DeviceId, OneTimeKeyAlgorithm, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, - OwnedMxcUri, OwnedOneTimeKeyId, OwnedUserId, UInt, UserId, + OwnedMxcUri, OwnedOneTimeKeyId, OwnedRoomId, OwnedUserId, UInt, UserId, }; use crate::{services, Error, Result}; @@ -15,14 +26,30 @@ mod data; pub(crate) use data::Data; +pub(crate) struct SlidingSyncCache { + lists: BTreeMap, + subscriptions: BTreeMap, + // For every room, the roomsince number + known_rooms: BTreeMap>, + extensions: ExtensionsConfig, +} + pub(crate) struct Service { pub(crate) db: &'static dyn Data, + #[allow(clippy::type_complexity)] + pub(crate) connections: Mutex< + BTreeMap< + (OwnedUserId, OwnedDeviceId, String), + Arc>, + >, + >, } impl Service { pub(crate) fn new(db: &'static dyn Data) -> Self { Self { db, + connections: Mutex::new(BTreeMap::new()), } } @@ -31,6 +58,206 @@ impl Service { self.db.exists(user_id) } + pub(crate) fn forget_sync_request_connection( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + ) { + self.connections.lock().unwrap().remove(&(user_id, device_id, conn_id)); + } + + #[allow(clippy::too_many_lines)] + pub(crate) fn update_sync_request_with_cache( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + request: &mut sync_events::v4::Request, + ) -> BTreeMap> { + let Some(conn_id) = request.conn_id.clone() else { + return BTreeMap::new(); + }; + + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + for (list_id, list) in &mut request.lists { + if let Some(cached_list) = cached.lists.get(list_id) { + if list.sort.is_empty() { + list.sort.clone_from(&cached_list.sort); + } + if list.room_details.required_state.is_empty() { + list.room_details + .required_state + .clone_from(&cached_list.room_details.required_state); + } + list.room_details.timeline_limit = list + .room_details + .timeline_limit + .or(cached_list.room_details.timeline_limit); + list.include_old_rooms = list + .include_old_rooms + .clone() + .or(cached_list.include_old_rooms.clone()); + match (&mut list.filters, cached_list.filters.clone()) { + (Some(list_filters), Some(cached_filters)) => { + list_filters.is_dm = + list_filters.is_dm.or(cached_filters.is_dm); + if list_filters.spaces.is_empty() { + list_filters.spaces = cached_filters.spaces; + } + list_filters.is_encrypted = list_filters + .is_encrypted + .or(cached_filters.is_encrypted); + list_filters.is_invite = + list_filters.is_invite.or(cached_filters.is_invite); + if list_filters.room_types.is_empty() { + list_filters.room_types = cached_filters.room_types; + } + if list_filters.not_room_types.is_empty() { + list_filters.not_room_types = + cached_filters.not_room_types; + } + list_filters.room_name_like = list_filters + .room_name_like + .clone() + .or(cached_filters.room_name_like); + if list_filters.tags.is_empty() { + list_filters.tags = cached_filters.tags; + } + if list_filters.not_tags.is_empty() { + list_filters.not_tags = cached_filters.not_tags; + } + } + (_, Some(cached_filters)) => { + list.filters = Some(cached_filters); + } + (Some(list_filters), _) => { + list.filters = Some(list_filters.clone()); + } + (..) => {} + } + if list.bump_event_types.is_empty() { + list.bump_event_types + .clone_from(&cached_list.bump_event_types); + } + } + cached.lists.insert(list_id.clone(), list.clone()); + } + + cached.subscriptions.extend( + request + .room_subscriptions + .iter() + .map(|(k, v)| (k.clone(), v.clone())), + ); + request.room_subscriptions.extend( + cached.subscriptions.iter().map(|(k, v)| (k.clone(), v.clone())), + ); + + request.extensions.e2ee.enabled = + request.extensions.e2ee.enabled.or(cached.extensions.e2ee.enabled); + + request.extensions.to_device.enabled = request + .extensions + .to_device + .enabled + .or(cached.extensions.to_device.enabled); + + request.extensions.account_data.enabled = request + .extensions + .account_data + .enabled + .or(cached.extensions.account_data.enabled); + request.extensions.account_data.lists = request + .extensions + .account_data + .lists + .clone() + .or(cached.extensions.account_data.lists.clone()); + request.extensions.account_data.rooms = request + .extensions + .account_data + .rooms + .clone() + .or(cached.extensions.account_data.rooms.clone()); + + cached.extensions = request.extensions.clone(); + + cached.known_rooms.clone() + } + + pub(crate) fn update_sync_subscriptions( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + subscriptions: BTreeMap, + ) { + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + cached.subscriptions = subscriptions; + } + + pub(crate) fn update_sync_known_rooms( + &self, + user_id: OwnedUserId, + device_id: OwnedDeviceId, + conn_id: String, + list_id: String, + new_cached_rooms: BTreeSet, + globalsince: u64, + ) { + let mut cache = self.connections.lock().unwrap(); + let cached = Arc::clone( + cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + }), + ); + let cached = &mut cached.lock().unwrap(); + drop(cache); + + for (roomid, lastsince) in + cached.known_rooms.entry(list_id.clone()).or_default().iter_mut() + { + if !new_cached_rooms.contains(roomid) { + *lastsince = 0; + } + } + let list = cached.known_rooms.entry(list_id).or_default(); + for roomid in new_cached_rooms { + list.insert(roomid, globalsince); + } + } + /// Check if account is deactivated pub(crate) fn is_deactivated(&self, user_id: &UserId) -> Result { self.db.is_deactivated(user_id) From 6d4f165629c292b11b1519edef9f5727c7c04207 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 11:27:02 +0000 Subject: [PATCH 04/33] MSC3575: use explicit loop for ops creation --- src/api/client_server/sync/msc3575.rs | 105 ++++++++++++-------------- 1 file changed, 50 insertions(+), 55 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 31333eaf..e1ede5de 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -305,64 +305,59 @@ pub(crate) async fn sync_events_v4_route( let mut new_known_rooms = BTreeSet::new(); + let mut ops = Vec::new(); + for (mut from, mut to) in list.ranges { + from = from.clamp( + uint!(0), + UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), + ); + to = to.clamp( + from, + UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), + ); + let room_ids = + all_joined_rooms[from.try_into().unwrap_or(usize::MAX) + ..=to.try_into().unwrap_or(usize::MAX)] + .to_vec(); + + new_known_rooms.extend(room_ids.iter().cloned()); + for room_id in &room_ids { + let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( + BTreeSet::new(), + 0, + u64::MAX, + )); + let limit = list + .room_details + .timeline_limit + .map_or(10, u64::from) + .min(100); + todo_room + .0 + .extend(list.room_details.required_state.iter().cloned()); + todo_room.1 = todo_room.1.max(limit); + // 0 means unknown because it got out of date + todo_room.2 = todo_room.2.min( + known_rooms + .get(&list_id) + .and_then(|k| k.get(room_id)) + .copied() + .unwrap_or(0), + ); + } + ops.push(sync_events::v4::SyncOp { + op: SlidingOp::Sync, + range: Some((from, to)), + index: None, + room_ids, + room_id: None, + }); + } + lists.insert( list_id.clone(), sync_events::v4::SyncList { - ops: list - .ranges - .into_iter() - .map(|mut r| { - r.0 = r.0.clamp( - uint!(0), - UInt::try_from(all_joined_rooms.len() - 1) - .unwrap_or(UInt::MAX), - ); - r.1 = r.1.clamp( - r.0, - UInt::try_from(all_joined_rooms.len() - 1) - .unwrap_or(UInt::MAX), - ); - let room_ids = all_joined_rooms[r - .0 - .try_into() - .unwrap_or(usize::MAX) - ..=r.1.try_into().unwrap_or(usize::MAX)] - .to_vec(); - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - let limit = list - .room_details - .timeline_limit - .map_or(10, u64::from) - .min(100); - todo_room.0.extend( - list.room_details - .required_state - .iter() - .cloned(), - ); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(&list_id) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r), - index: None, - room_ids, - room_id: None, - } - }) - .collect(), + ops, count: UInt::try_from(all_joined_rooms.len()) .unwrap_or(UInt::MAX), }, From e75fe7b75ac8dea2b39f83b3142c183872d8971a Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 11:41:24 +0000 Subject: [PATCH 05/33] MSC3575: factor out TodoRoom --- src/api/client_server/sync/msc3575.rs | 92 +++++++++++++++------------ 1 file changed, 52 insertions(+), 40 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index e1ede5de..7d748e42 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -19,7 +19,7 @@ use ruma::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, TimelineEventType, }, - uint, JsOption, UInt, UserId, + uint, JsOption, OwnedRoomId, RoomId, UInt, UserId, }; use tracing::{debug, error}; @@ -29,6 +29,38 @@ use crate::{ services, Ar, Error, Ra, Result, }; +struct TodoRoom { + required_state_request: BTreeSet<(StateEventType, String)>, + timeline_limit: u64, + roomsince: u64, +} +impl TodoRoom { + fn update( + &mut self, + required_state: Vec<(StateEventType, String)>, + timeline_limit: UInt, + known_rooms: Option<&BTreeMap>, + room_id: &RoomId, + ) { + self.required_state_request.extend(required_state); + self.timeline_limit = + self.timeline_limit.max(u64::from(timeline_limit).min(100)); + // 0 means unknown because it got out of date + self.roomsince = self.roomsince.min( + known_rooms.and_then(|k| k.get(room_id)).copied().unwrap_or(0), + ); + } +} +impl Default for TodoRoom { + fn default() -> Self { + Self { + required_state_request: BTreeSet::new(), + timeline_limit: 0, + roomsince: u64::MAX, + } + } +} + #[allow(clippy::too_many_lines)] pub(crate) async fn sync_events_v4_route( body: Ar, @@ -296,7 +328,7 @@ pub(crate) async fn sync_events_v4_route( let mut lists = BTreeMap::new(); // and required state - let mut todo_rooms = BTreeMap::new(); + let mut todo_rooms: BTreeMap = BTreeMap::new(); for (list_id, list) in body.lists { if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { @@ -322,27 +354,11 @@ pub(crate) async fn sync_events_v4_route( new_known_rooms.extend(room_ids.iter().cloned()); for room_id in &room_ids { - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( - BTreeSet::new(), - 0, - u64::MAX, - )); - let limit = list - .room_details - .timeline_limit - .map_or(10, u64::from) - .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(&list_id) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), + todo_rooms.entry(room_id.clone()).or_default().update( + list.room_details.required_state.clone(), + list.room_details.timeline_limit.unwrap_or(uint!(10)), + known_rooms.get(&list_id), + room_id, ); } ops.push(sync_events::v4::SyncOp { @@ -380,21 +396,11 @@ pub(crate) async fn sync_events_v4_route( if !services().rooms.metadata.exists(room_id)? { continue; } - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( - BTreeSet::new(), - 0, - u64::MAX, - )); - let limit = room.timeline_limit.map_or(10, u64::from).min(100); - todo_room.0.extend(room.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), + todo_rooms.entry(room_id.clone()).or_default().update( + room.required_state.clone(), + room.timeline_limit.unwrap_or(uint!(10)), + known_rooms.get("subscriptions"), + room_id, ); known_subscription_rooms.insert(room_id.clone()); } @@ -425,8 +431,14 @@ pub(crate) async fn sync_events_v4_route( } let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in - &todo_rooms + for ( + room_id, + TodoRoom { + required_state_request, + timeline_limit, + roomsince, + }, + ) in &todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); From 3bbee92db483a016841c388712b40105fae99ab2 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 16:26:44 +0000 Subject: [PATCH 06/33] MSC3575: remove useless Ok-wrapping --- src/api/client_server/sync/msc3575.rs | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 7d748e42..55ed37a8 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -455,15 +455,13 @@ pub(crate) async fn sync_events_v4_route( let prev_batch = timeline_pdus .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("Timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })? + .map(|(pdu_count, _)| match pdu_count { + PduCount::Backfilled(_) => { + error!("Timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + }) .or_else(|| (roomsince != &0).then(|| roomsince.to_string())); let room_events: Vec<_> = timeline_pdus From 991f1e2c0d9c14f80b2c4e3cf4338722e528f78f Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 17:25:47 +0000 Subject: [PATCH 07/33] MSC3575: factor out ConnectionKey --- src/api/client_server/sync/msc3575.rs | 47 ++++++-------- src/service/users.rs | 88 +++++++++------------------ 2 files changed, 50 insertions(+), 85 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 55ed37a8..ed8fafe3 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -25,7 +25,7 @@ use tracing::{debug, error}; use super::{load_timeline, share_encrypted_room}; use crate::{ - service::{account_data, rooms::timeline::PduCount}, + service::{account_data, rooms::timeline::PduCount, users::ConnectionKey}, services, Ar, Error, Ra, Result, }; @@ -76,22 +76,22 @@ pub(crate) async fn sync_events_v4_route( let globalsince = body.pos.as_ref().and_then(|string| string.parse().ok()).unwrap_or(0); + let connection_key = ConnectionKey { + user: sender_user.clone(), + device: sender_device.clone(), + connection: body.conn_id.clone(), + }; + if globalsince == 0 { - if let Some(conn_id) = &body.conn_id { - services().users.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + if body.conn_id.is_some() { + services().users.forget_sync_request_connection(&connection_key); } } // Get sticky parameters from cache - let known_rooms = services().users.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let known_rooms = services() + .users + .update_sync_request_with_cache(connection_key.clone(), &mut body); let all_joined_rooms = services() .rooms @@ -379,11 +379,9 @@ pub(crate) async fn sync_events_v4_route( }, ); - if let Some(conn_id) = &body.conn_id { + if body.conn_id.is_some() { services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), + connection_key.clone(), list_id, new_known_rooms, globalsince, @@ -410,24 +408,19 @@ pub(crate) async fn sync_events_v4_route( body.room_subscriptions.remove(&r); } - if let Some(conn_id) = &body.conn_id { + if body.conn_id.is_some() { services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), + connection_key.clone(), "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + if body.conn_id.is_some() { + services() + .users + .update_sync_subscriptions(connection_key, body.room_subscriptions); } let mut rooms = BTreeMap::new(); diff --git a/src/service/users.rs b/src/service/users.rs index 9df6e835..a30de193 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -26,6 +26,7 @@ mod data; pub(crate) use data::Data; +#[derive(Debug, Default)] pub(crate) struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, @@ -34,15 +35,18 @@ pub(crate) struct SlidingSyncCache { extensions: ExtensionsConfig, } +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) struct ConnectionKey { + pub(crate) user: OwnedUserId, + pub(crate) device: OwnedDeviceId, + pub(crate) connection: Option, +} + pub(crate) struct Service { pub(crate) db: &'static dyn Data, #[allow(clippy::type_complexity)] - pub(crate) connections: Mutex< - BTreeMap< - (OwnedUserId, OwnedDeviceId, String), - Arc>, - >, - >, + pub(crate) connections: + Mutex>>>, } impl Service { @@ -53,6 +57,14 @@ impl Service { } } + fn get_cache_entry( + &self, + key: ConnectionKey, + ) -> Arc> { + let mut cache = self.connections.lock().unwrap(); + Arc::clone(cache.entry(key).or_default()) + } + /// Check if a user has an account on this homeserver. pub(crate) fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) @@ -60,37 +72,23 @@ impl Service { pub(crate) fn forget_sync_request_connection( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + connection_key: &ConnectionKey, ) { - self.connections.lock().unwrap().remove(&(user_id, device_id, conn_id)); + self.connections.lock().unwrap().remove(connection_key); } #[allow(clippy::too_many_lines)] pub(crate) fn update_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + connection_key: ConnectionKey, request: &mut sync_events::v4::Request, ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { + if connection_key.connection.is_none() { return BTreeMap::new(); }; - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); + let cached = self.get_cache_entry(connection_key); + let mut cached = cached.lock().unwrap(); for (list_id, list) in &mut request.lists { if let Some(cached_list) = cached.lists.get(list_id) { @@ -200,50 +198,24 @@ impl Service { pub(crate) fn update_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + connection_key: ConnectionKey, subscriptions: BTreeMap, ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); + let cached = self.get_cache_entry(connection_key); + let mut cached = cached.lock().unwrap(); cached.subscriptions = subscriptions; } pub(crate) fn update_sync_known_rooms( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + connection_key: ConnectionKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache.entry((user_id, device_id, conn_id)).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); + let cached = self.get_cache_entry(connection_key); + let mut cached = cached.lock().unwrap(); for (roomid, lastsince) in cached.known_rooms.entry(list_id.clone()).or_default().iter_mut() From 64ad940bad1503794c448942630a487bbdc6bddc Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 18:30:00 +0000 Subject: [PATCH 08/33] MSC3575: remove sticky parameters --- src/api/client_server/sync/msc3575.rs | 15 +-- src/service/users.rs | 133 +------------------------- 2 files changed, 6 insertions(+), 142 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index ed8fafe3..63ded1ae 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -67,7 +67,7 @@ pub(crate) async fn sync_events_v4_route( ) -> Result, Ra> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); - let mut body = body.body; + let body = body.body; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); @@ -88,10 +88,8 @@ pub(crate) async fn sync_events_v4_route( } } - // Get sticky parameters from cache - let known_rooms = services() - .users - .update_sync_request_with_cache(connection_key.clone(), &mut body); + let known_rooms = + services().users.get_rooms_in_connection(connection_key.clone()); let all_joined_rooms = services() .rooms @@ -405,7 +403,6 @@ pub(crate) async fn sync_events_v4_route( for r in body.unsubscribe_rooms { known_subscription_rooms.remove(&r); - body.room_subscriptions.remove(&r); } if body.conn_id.is_some() { @@ -417,12 +414,6 @@ pub(crate) async fn sync_events_v4_route( ); } - if body.conn_id.is_some() { - services() - .users - .update_sync_subscriptions(connection_key, body.room_subscriptions); - } - let mut rooms = BTreeMap::new(); for ( room_id, diff --git a/src/service/users.rs b/src/service/users.rs index a30de193..ac57b71d 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -5,14 +5,7 @@ use std::{ }; use ruma::{ - api::client::{ - device::Device, - filter::FilterDefinition, - sync::sync_events::{ - self, - v4::{ExtensionsConfig, SyncRequestList}, - }, - }, + api::client::{device::Device, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, @@ -28,11 +21,7 @@ pub(crate) use data::Data; #[derive(Debug, Default)] pub(crate) struct SlidingSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - // For every room, the roomsince number known_rooms: BTreeMap>, - extensions: ExtensionsConfig, } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -77,136 +66,20 @@ impl Service { self.connections.lock().unwrap().remove(connection_key); } - #[allow(clippy::too_many_lines)] - pub(crate) fn update_sync_request_with_cache( + pub(crate) fn get_rooms_in_connection( &self, connection_key: ConnectionKey, - request: &mut sync_events::v4::Request, ) -> BTreeMap> { if connection_key.connection.is_none() { return BTreeMap::new(); }; let cached = self.get_cache_entry(connection_key); - let mut cached = cached.lock().unwrap(); - - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort.clone_from(&cached_list.sort); - } - if list.room_details.required_state.is_empty() { - list.room_details - .required_state - .clone_from(&cached_list.room_details.required_state); - } - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or(cached_list.include_old_rooms.clone()); - match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = - list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = list_filters - .is_encrypted - .or(cached_filters.is_encrypted); - list_filters.is_invite = - list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = - cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } - } - (_, Some(cached_filters)) => { - list.filters = Some(cached_filters); - } - (Some(list_filters), _) => { - list.filters = Some(list_filters.clone()); - } - (..) => {} - } - if list.bump_event_types.is_empty() { - list.bump_event_types - .clone_from(&cached_list.bump_event_types); - } - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached.subscriptions.extend( - request - .room_subscriptions - .iter() - .map(|(k, v)| (k.clone(), v.clone())), - ); - request.room_subscriptions.extend( - cached.subscriptions.iter().map(|(k, v)| (k.clone(), v.clone())), - ); - - request.extensions.e2ee.enabled = - request.extensions.e2ee.enabled.or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or(cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or(cached.extensions.account_data.rooms.clone()); - - cached.extensions = request.extensions.clone(); + let cached = cached.lock().unwrap(); cached.known_rooms.clone() } - pub(crate) fn update_sync_subscriptions( - &self, - connection_key: ConnectionKey, - subscriptions: BTreeMap, - ) { - let cached = self.get_cache_entry(connection_key); - let mut cached = cached.lock().unwrap(); - - cached.subscriptions = subscriptions; - } - pub(crate) fn update_sync_known_rooms( &self, connection_key: ConnectionKey, From ede39370a492267362c4fb0ff8e23db70824002e Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 18:53:10 +0000 Subject: [PATCH 09/33] MSC3575: allow empty conn_id --- src/api/client_server/sync/msc3575.rs | 32 +++++++++++---------------- src/service/users.rs | 4 ---- 2 files changed, 13 insertions(+), 23 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 63ded1ae..8a98cd92 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -83,9 +83,7 @@ pub(crate) async fn sync_events_v4_route( }; if globalsince == 0 { - if body.conn_id.is_some() { - services().users.forget_sync_request_connection(&connection_key); - } + services().users.forget_sync_request_connection(&connection_key); } let known_rooms = @@ -377,14 +375,12 @@ pub(crate) async fn sync_events_v4_route( }, ); - if body.conn_id.is_some() { - services().users.update_sync_known_rooms( - connection_key.clone(), - list_id, - new_known_rooms, - globalsince, - ); - } + services().users.update_sync_known_rooms( + connection_key.clone(), + list_id, + new_known_rooms, + globalsince, + ); } let mut known_subscription_rooms = BTreeSet::new(); @@ -405,14 +401,12 @@ pub(crate) async fn sync_events_v4_route( known_subscription_rooms.remove(&r); } - if body.conn_id.is_some() { - services().users.update_sync_known_rooms( - connection_key.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } + services().users.update_sync_known_rooms( + connection_key.clone(), + "subscriptions".to_owned(), + known_subscription_rooms, + globalsince, + ); let mut rooms = BTreeMap::new(); for ( diff --git a/src/service/users.rs b/src/service/users.rs index ac57b71d..ab64130e 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -70,10 +70,6 @@ impl Service { &self, connection_key: ConnectionKey, ) -> BTreeMap> { - if connection_key.connection.is_none() { - return BTreeMap::new(); - }; - let cached = self.get_cache_entry(connection_key); let cached = cached.lock().unwrap(); From a72ff4f6e6b1e224d4973bee99913e6ee09408df Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 19:32:12 +0000 Subject: [PATCH 10/33] MSC3575: avoid double-processing rooms in case of overlapping ranges --- src/api/client_server/sync/msc3575.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 8a98cd92..918d9f27 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -331,7 +331,7 @@ pub(crate) async fn sync_events_v4_route( continue; } - let mut new_known_rooms = BTreeSet::new(); + let mut list_room_ids = BTreeSet::new(); let mut ops = Vec::new(); for (mut from, mut to) in list.ranges { @@ -348,15 +348,7 @@ pub(crate) async fn sync_events_v4_route( ..=to.try_into().unwrap_or(usize::MAX)] .to_vec(); - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - todo_rooms.entry(room_id.clone()).or_default().update( - list.room_details.required_state.clone(), - list.room_details.timeline_limit.unwrap_or(uint!(10)), - known_rooms.get(&list_id), - room_id, - ); - } + list_room_ids.extend(room_ids.iter().cloned()); ops.push(sync_events::v4::SyncOp { op: SlidingOp::Sync, range: Some((from, to)), @@ -366,6 +358,15 @@ pub(crate) async fn sync_events_v4_route( }); } + for room_id in &list_room_ids { + todo_rooms.entry(room_id.clone()).or_default().update( + list.room_details.required_state.clone(), + list.room_details.timeline_limit.unwrap_or(uint!(10)), + known_rooms.get(&list_id), + room_id, + ); + } + lists.insert( list_id.clone(), sync_events::v4::SyncList { @@ -378,7 +379,7 @@ pub(crate) async fn sync_events_v4_route( services().users.update_sync_known_rooms( connection_key.clone(), list_id, - new_known_rooms, + list_room_ids, globalsince, ); } From b4f6c88e5793c8794c24d275e8ad7a1848cfb9e8 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 19:00:34 +0000 Subject: [PATCH 11/33] MSC3575: make known rooms global per connection rather than per list --- src/api/client_server/sync/msc3575.rs | 27 ++++++--------------------- src/service/users.rs | 15 +++------------ 2 files changed, 9 insertions(+), 33 deletions(-) diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc3575.rs index 918d9f27..47e93472 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc3575.rs @@ -39,16 +39,15 @@ impl TodoRoom { &mut self, required_state: Vec<(StateEventType, String)>, timeline_limit: UInt, - known_rooms: Option<&BTreeMap>, + known_rooms: &BTreeMap, room_id: &RoomId, ) { self.required_state_request.extend(required_state); self.timeline_limit = self.timeline_limit.max(u64::from(timeline_limit).min(100)); // 0 means unknown because it got out of date - self.roomsince = self.roomsince.min( - known_rooms.and_then(|k| k.get(room_id)).copied().unwrap_or(0), - ); + self.roomsince = + self.roomsince.min(known_rooms.get(room_id).copied().unwrap_or(0)); } } impl Default for TodoRoom { @@ -362,7 +361,7 @@ pub(crate) async fn sync_events_v4_route( todo_rooms.entry(room_id.clone()).or_default().update( list.room_details.required_state.clone(), list.room_details.timeline_limit.unwrap_or(uint!(10)), - known_rooms.get(&list_id), + &known_rooms, room_id, ); } @@ -375,16 +374,8 @@ pub(crate) async fn sync_events_v4_route( .unwrap_or(UInt::MAX), }, ); - - services().users.update_sync_known_rooms( - connection_key.clone(), - list_id, - list_room_ids, - globalsince, - ); } - let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { if !services().rooms.metadata.exists(room_id)? { continue; @@ -392,20 +383,14 @@ pub(crate) async fn sync_events_v4_route( todo_rooms.entry(room_id.clone()).or_default().update( room.required_state.clone(), room.timeline_limit.unwrap_or(uint!(10)), - known_rooms.get("subscriptions"), + &known_rooms, room_id, ); - known_subscription_rooms.insert(room_id.clone()); - } - - for r in body.unsubscribe_rooms { - known_subscription_rooms.remove(&r); } services().users.update_sync_known_rooms( connection_key.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, + todo_rooms.keys().cloned().collect(), globalsince, ); diff --git a/src/service/users.rs b/src/service/users.rs index ab64130e..928843ae 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -21,7 +21,7 @@ pub(crate) use data::Data; #[derive(Debug, Default)] pub(crate) struct SlidingSyncCache { - known_rooms: BTreeMap>, + known_rooms: BTreeMap, } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -69,7 +69,7 @@ impl Service { pub(crate) fn get_rooms_in_connection( &self, connection_key: ConnectionKey, - ) -> BTreeMap> { + ) -> BTreeMap { let cached = self.get_cache_entry(connection_key); let cached = cached.lock().unwrap(); @@ -79,23 +79,14 @@ impl Service { pub(crate) fn update_sync_known_rooms( &self, connection_key: ConnectionKey, - list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { let cached = self.get_cache_entry(connection_key); let mut cached = cached.lock().unwrap(); - for (roomid, lastsince) in - cached.known_rooms.entry(list_id.clone()).or_default().iter_mut() - { - if !new_cached_rooms.contains(roomid) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + cached.known_rooms.insert(roomid, globalsince); } } From c9a435ab34ce4d888f4859244fd0db9914b87d23 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 11:04:11 +0000 Subject: [PATCH 12/33] Renamed MSC3575 -> MSC4186, remove obsolete config --- Cargo.toml | 2 +- src/api/client_server/sync.rs | 2 +- src/api/client_server/sync/{msc3575.rs => msc4186.rs} | 4 ++-- src/api/client_server/unversioned.rs | 1 + src/api/well_known.rs | 9 --------- src/cli/serve.rs | 2 +- src/config.rs | 3 --- 7 files changed, 6 insertions(+), 17 deletions(-) rename src/api/client_server/sync/{msc3575.rs => msc4186.rs} (99%) diff --git a/Cargo.toml b/Cargo.toml index 6cfd3f65..87ee7593 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,7 +165,7 @@ features = [ "state-res", "unstable-msc2448", "ring-compat", - "unstable-msc3575", + "unstable-msc4186", ] [target.'cfg(unix)'.dependencies] diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 2a17aa17..a2ed2c86 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -5,7 +5,7 @@ use crate::{ service::rooms::timeline::PduCount, services, Error, PduEvent, Result, }; -pub(crate) mod msc3575; +pub(crate) mod msc4186; pub(crate) mod v3; fn load_timeline( diff --git a/src/api/client_server/sync/msc3575.rs b/src/api/client_server/sync/msc4186.rs similarity index 99% rename from src/api/client_server/sync/msc3575.rs rename to src/api/client_server/sync/msc4186.rs index 47e93472..969853b6 100644 --- a/src/api/client_server/sync/msc3575.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -1,6 +1,6 @@ -//! [MSC3575], aka Sliding Sync, aka Sync v3 (even though the endpoint is called -//! /v4) support +//! [MSC4186], aka Simplified Sliding Sync, aka Simplified [MSC3575], support //! +//! [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 //! [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 use std::{ diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 92ea88ed..5605f53c 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -32,6 +32,7 @@ pub(crate) async fn get_supported_versions_route( unstable_features: BTreeMap::from_iter([ ("org.matrix.e2e_cross_signing".to_owned(), true), ("org.matrix.msc3916.stable".to_owned(), true), + ("org.matrix.simplified_msc3575".to_owned(), true), ]), }; diff --git a/src/api/well_known.rs b/src/api/well_known.rs index 44210e55..edb5a347 100644 --- a/src/api/well_known.rs +++ b/src/api/well_known.rs @@ -37,14 +37,5 @@ pub(crate) async fn client(_: Ar) -> Ra { Ra(client::Response { homeserver: client::HomeserverInfo::new(base_url.clone()), identity_server: None, - sliding_sync_proxy: services() - .globals - .config - .server_discovery - .client - .advertise_sliding_sync - .then_some(client::SlidingSyncProxyInfo { - url: base_url, - }), }) } diff --git a/src/cli/serve.rs b/src/cli/serve.rs index 7a528748..be781fe5 100644 --- a/src/cli/serve.rs +++ b/src/cli/serve.rs @@ -651,7 +651,7 @@ fn client_routes() -> Router { .ruma_route(c2s::get_state_events_route) .ruma_route(c2s::get_state_events_for_key_route) .ruma_route(c2s::v3::sync_events_route) - .ruma_route(c2s::msc3575::sync_events_v4_route) + .ruma_route(c2s::msc4186::sync_events_v5_route) .ruma_route(c2s::get_context_route) .ruma_route(c2s::get_message_events_route) .ruma_route(c2s::search_events_route) diff --git a/src/config.rs b/src/config.rs index b597e4a9..5924d232 100644 --- a/src/config.rs +++ b/src/config.rs @@ -151,9 +151,6 @@ pub(crate) struct ServerServerDiscovery { pub(crate) struct ClientServerDiscovery { /// The base URL to make client-server API requests to pub(crate) base_url: Url, - - #[serde(default, rename = "advertise_buggy_sliding_sync")] - pub(crate) advertise_sliding_sync: bool, } #[derive(Debug, Deserialize)] From 8e94020d4f3f75299d6c43eeb68dddcaece91fb3 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 11:06:16 +0000 Subject: [PATCH 13/33] SSS: switch from v4 to v5 types --- src/api/client_server/sync/msc4186.rs | 55 +++++++++------------------ 1 file changed, 19 insertions(+), 36 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 969853b6..01a3fe31 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -10,9 +10,7 @@ use std::{ use ruma::{ api::client::{ - sync::sync_events::{ - self, v4::SlidingOp, DeviceLists, UnreadNotificationsCount, - }, + sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, uiaa::UiaaResponse, }, events::{ @@ -61,9 +59,9 @@ impl Default for TodoRoom { } #[allow(clippy::too_many_lines)] -pub(crate) async fn sync_events_v4_route( - body: Ar, -) -> Result, Ra> { +pub(crate) async fn sync_events_v5_route( + body: Ar, +) -> Result, Ra> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -331,8 +329,6 @@ pub(crate) async fn sync_events_v4_route( } let mut list_room_ids = BTreeSet::new(); - - let mut ops = Vec::new(); for (mut from, mut to) in list.ranges { from = from.clamp( uint!(0), @@ -346,32 +342,20 @@ pub(crate) async fn sync_events_v4_route( all_joined_rooms[from.try_into().unwrap_or(usize::MAX) ..=to.try_into().unwrap_or(usize::MAX)] .to_vec(); - - list_room_ids.extend(room_ids.iter().cloned()); - ops.push(sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some((from, to)), - index: None, - room_ids, - room_id: None, - }); + list_room_ids.extend(room_ids); } - for room_id in &list_room_ids { todo_rooms.entry(room_id.clone()).or_default().update( list.room_details.required_state.clone(), - list.room_details.timeline_limit.unwrap_or(uint!(10)), + list.room_details.timeline_limit, &known_rooms, room_id, ); } - lists.insert( list_id.clone(), - sync_events::v4::SyncList { - ops, - count: UInt::try_from(all_joined_rooms.len()) - .unwrap_or(UInt::MAX), + sync_events::v5::response::List { + count: UInt::try_from(list_room_ids.len()).unwrap_or(UInt::MAX), }, ); } @@ -382,7 +366,7 @@ pub(crate) async fn sync_events_v4_route( } todo_rooms.entry(room_id.clone()).or_default().update( room.required_state.clone(), - room.timeline_limit.unwrap_or(uint!(10)), + room.timeline_limit, &known_rooms, room_id, ); @@ -493,7 +477,7 @@ pub(crate) async fn sync_events_v4_route( rooms.insert( room_id.clone(), - sync_events::v4::SlidingSyncRoom { + sync_events::v5::response::Room { name: services() .rooms .state_accessor @@ -553,7 +537,8 @@ pub(crate) async fn sync_events_v4_route( ), // Count events in timeline greater than global sync counter num_live: None, - timestamp: None, + // TODO + bump_stamp: None, // TODO heroes: None, }, @@ -576,13 +561,12 @@ pub(crate) async fn sync_events_v4_route( }; } - Ok(Ra(sync_events::v4::Response { - initial: globalsince == 0, + Ok(Ra(sync_events::v5::Response { txn_id: body.txn_id.clone(), pos: next_batch.to_string(), lists, rooms, - extensions: sync_events::v4::Extensions { + extensions: sync_events::v5::response::Extensions { to_device: body .extensions .to_device @@ -592,13 +576,13 @@ pub(crate) async fn sync_events_v4_route( services() .users .get_to_device_events(&sender_user, &sender_device) - .map(|events| sync_events::v4::ToDevice { + .map(|events| sync_events::v5::response::ToDevice { events, next_batch: next_batch.to_string(), }) }) .transpose()?, - e2ee: sync_events::v4::E2EE { + e2ee: sync_events::v5::response::E2EE { device_lists: DeviceLists { changed: device_list_changes.into_iter().collect(), left: device_list_left.into_iter().collect(), @@ -609,7 +593,7 @@ pub(crate) async fn sync_events_v4_route( // Fallback keys are not yet supported device_unused_fallback_key_types: None, }, - account_data: sync_events::v4::AccountData { + account_data: sync_events::v5::response::AccountData { global: if body.extensions.account_data.enabled.unwrap_or(false) { services() @@ -628,13 +612,12 @@ pub(crate) async fn sync_events_v4_route( }, rooms: BTreeMap::new(), }, - receipts: sync_events::v4::Receipts { + receipts: sync_events::v5::response::Receipts { rooms: BTreeMap::new(), }, - typing: sync_events::v4::Typing { + typing: sync_events::v5::response::Typing { rooms: BTreeMap::new(), }, }, - delta_token: None, })) } From 142e3158e4a488c07ede1e4a2ec304ac39b91f02 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 10 Aug 2025 11:20:02 +0000 Subject: [PATCH 14/33] Bump ruma with SSS patches --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 274e64f8..b29d6164 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2442,7 +2442,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.12.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "assign", "js_int", @@ -2461,7 +2461,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.12.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "js_int", "ruma-common", @@ -2473,7 +2473,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.20.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "as_variant", "assign", @@ -2496,7 +2496,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.15.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "as_variant", "base64", @@ -2527,7 +2527,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.30.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "as_variant", "indexmap 2.10.0", @@ -2550,7 +2550,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.11.0" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "bytes", "headers", @@ -2572,7 +2572,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.10.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "js_int", "thiserror 2.0.12", @@ -2581,7 +2581,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.15.1" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "cfg-if", "proc-macro-crate", @@ -2596,7 +2596,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.11.0" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "js_int", "ruma-common", @@ -2608,7 +2608,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.17.0" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "base64", "ed25519-dalek", @@ -2624,7 +2624,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.13.0" -source = "git+https://github.com/ruma/ruma.git?rev=1387667de806c37a6d7f72125117009bd618e32a#1387667de806c37a6d7f72125117009bd618e32a" +source = "git+https://gitlab.computer.surgery/matrix/ruma.git?rev=ruma-0.12.1%2Bgrapevine-1#598c22e1d99ac6ce5b3864259e63a6d5bdc00199" dependencies = [ "js_int", "ruma-common", diff --git a/Cargo.toml b/Cargo.toml index 87ee7593..6222d501 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -144,8 +144,8 @@ trust-dns-resolver = "0.23.2" xdg = "2.5.2" [dependencies.ruma] -git = "https://github.com/ruma/ruma.git" -rev = "1387667de806c37a6d7f72125117009bd618e32a" +git = "https://gitlab.computer.surgery/matrix/ruma.git" +rev = "ruma-0.12.1+grapevine-1" features = [ "compat-server-signing-key-version", "compat-empty-string-null", From 9405e5f16c149c2a5e55155072062955c4cc46de Mon Sep 17 00:00:00 2001 From: Lambda Date: Sat, 31 May 2025 19:12:36 +0000 Subject: [PATCH 15/33] SSS: improve tracing --- src/api/client_server/sync/msc4186.rs | 192 +++++++++++++------------- 1 file changed, 99 insertions(+), 93 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 01a3fe31..41785beb 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -19,7 +19,7 @@ use ruma::{ }, uint, JsOption, OwnedRoomId, RoomId, UInt, UserId, }; -use tracing::{debug, error}; +use tracing::{debug, error, field, trace, warn}; use super::{load_timeline, share_encrypted_room}; use crate::{ @@ -27,6 +27,7 @@ use crate::{ services, Ar, Error, Ra, Result, }; +#[derive(Debug)] struct TodoRoom { required_state_request: BTreeSet<(StateEventType, String)>, timeline_limit: u64, @@ -59,9 +60,16 @@ impl Default for TodoRoom { } #[allow(clippy::too_many_lines)] +#[tracing::instrument(skip_all, fields( + pos, + next_batch, + connection_id = ?body.conn_id, +))] pub(crate) async fn sync_events_v5_route( body: Ar, ) -> Result, Ra> { + let current_span = tracing::Span::current(); + let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -69,7 +77,9 @@ pub(crate) async fn sync_events_v5_route( let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.next_count()?; + current_span.record("next_batch", field::display(&next_batch)); + current_span.record("pos", field::debug(&body.pos)); let globalsince = body.pos.as_ref().and_then(|string| string.parse().ok()).unwrap_or(0); @@ -324,6 +334,8 @@ pub(crate) async fn sync_events_v5_route( let mut todo_rooms: BTreeMap = BTreeMap::new(); for (list_id, list) in body.lists { + trace!(list_id, ?list, "Collecting rooms in list"); + if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { continue; } @@ -352,16 +364,20 @@ pub(crate) async fn sync_events_v5_route( room_id, ); } + let num_rooms = list_room_ids.len(); + trace!(list_id, num_rooms, "Done collecting rooms"); + lists.insert( list_id.clone(), sync_events::v5::response::List { - count: UInt::try_from(list_room_ids.len()).unwrap_or(UInt::MAX), + count: UInt::try_from(num_rooms).unwrap_or(UInt::MAX), }, ); } for (room_id, room) in &body.room_subscriptions { if !services().rooms.metadata.exists(room_id)? { + warn!(room_id = room_id.as_str(), "Subscribed room does not exist"); continue; } todo_rooms.entry(room_id.clone()).or_default().update( @@ -379,25 +395,23 @@ pub(crate) async fn sync_events_v5_route( ); let mut rooms = BTreeMap::new(); - for ( - room_id, - TodoRoom { - required_state_request, - timeline_limit, - roomsince, - }, - ) in &todo_rooms - { - let roomsincecount = PduCount::Normal(*roomsince); + for (room_id, todo_room) in &todo_rooms { + trace!( + room_id = room_id.as_str(), + ?todo_room, + "Processing matched room" + ); + let roomsincecount = PduCount::Normal(todo_room.roomsince); let (timeline_pdus, limited) = load_timeline( &sender_user, room_id, roomsincecount, - *timeline_limit, + todo_room.timeline_limit, )?; - if roomsince != &0 && timeline_pdus.is_empty() { + if todo_room.roomsince != 0 && timeline_pdus.is_empty() { + trace!("No new timeline events, skipping"); continue; } @@ -410,14 +424,18 @@ pub(crate) async fn sync_events_v5_route( } PduCount::Normal(c) => c.to_string(), }) - .or_else(|| (roomsince != &0).then(|| roomsince.to_string())); + .or_else(|| { + (todo_room.roomsince != 0) + .then(|| todo_room.roomsince.to_string()) + }); let room_events: Vec<_> = timeline_pdus .iter() .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let required_state = required_state_request + let required_state = todo_room + .required_state_request .iter() .filter_map(|state| { services() @@ -457,92 +475,80 @@ pub(crate) async fn sync_events_v5_route( .collect::>(); let name = match &*heroes { [] => None, - [only] => Some(only.0.clone()), - [firsts @ .., last] => Some({ + [(only, _)] => Some(only.clone()), + [firsts @ .., (last, _)] => Some({ let firsts = firsts .iter() - .map(|h| h.0.clone()) + .map(|(name, _)| name.clone()) .collect::>() .join(", "); - format!("{firsts} and {}", last.0) + format!("{firsts} and {last}") }), }; - let avatar = if let [only] = &*heroes { - only.1.clone() - } else { - None - }; - - rooms.insert( - room_id.clone(), - sync_events::v5::response::Room { - name: services() - .rooms - .state_accessor - .get_name(room_id)? - .or(name), - avatar: if let Some(avatar) = avatar { - JsOption::Some(avatar) - } else { - match services().rooms.state_accessor.get_avatar(room_id)? { - JsOption::Some(avatar) => { - JsOption::from_option(avatar.url) - } - JsOption::Null => JsOption::Null, - JsOption::Undefined => JsOption::Undefined, - } - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state: None, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services() - .rooms - .user - .highlight_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services() - .rooms - .user - .notification_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services() - .rooms - .state_cache - .room_joined_count(room_id)? - .map(UInt::new_saturating) - .unwrap_or(uint!(0)), - ), - invited_count: Some( - services() - .rooms - .state_cache - .room_invited_count(room_id)? - .map(UInt::new_saturating) - .unwrap_or(uint!(0)), - ), - // Count events in timeline greater than global sync counter - num_live: None, - // TODO - bump_stamp: None, - // TODO - heroes: None, + let room = sync_events::v5::response::Room { + name: services().rooms.state_accessor.get_name(room_id)?.or(name), + avatar: if let [(_name, Some(avatar))] = &*heroes { + JsOption::Some(avatar.clone()) + } else { + match services().rooms.state_accessor.get_avatar(room_id)? { + JsOption::Some(avatar) => JsOption::from_option(avatar.url), + JsOption::Null => JsOption::Null, + JsOption::Undefined => JsOption::Undefined, + } }, - ); + initial: Some(todo_room.roomsince == 0), + is_dm: None, + invite_state: None, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services() + .rooms + .user + .highlight_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services() + .rooms + .user + .notification_count(&sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services() + .rooms + .state_cache + .room_joined_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + invited_count: Some( + services() + .rooms + .state_cache + .room_invited_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + // Count events in timeline greater than global sync counter + num_live: None, + // TODO + bump_stamp: None, + // TODO + heroes: None, + }; + trace!(room_id = room_id.as_str(), ?room, "Built room data"); + + rooms.insert(room_id.clone(), room); } if rooms From 7558d3456b977111f4d74701d556bfa7265c30ad Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 27 Mar 2025 20:02:16 +0000 Subject: [PATCH 16/33] SSS: split up sync_events_v5_route() --- src/api/client_server/sync/msc4186.rs | 861 +++++++++++++------------- 1 file changed, 446 insertions(+), 415 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 41785beb..f4e579bf 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -111,269 +111,30 @@ pub(crate) async fn sync_events_v5_route( )?; } - // Users that have left any encrypted rooms the sender was in - let mut left_encrypted_users = HashSet::new(); - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); + #[allow(clippy::if_then_some_else_none)] + let device_lists = if body.extensions.e2ee.enabled.unwrap_or(false) { + Some(get_e2ee_data(&sender_user, globalsince, &all_joined_rooms).await?) + } else { + None + }; - if body.extensions.e2ee.enabled.unwrap_or(false) { - // Look for device list updates of this account - device_list_changes.extend( - services() - .users - .keys_changed(sender_user.as_ref(), globalsince, None) - .filter_map(Result::ok), - ); - - for room_id in &all_joined_rooms { - let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - else { - error!(%room_id, "Room has no state"); - continue; - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(room_id, globalsince)?; - - let since_sender_member: Option = - since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid PDU in database.") - }) - .ok() - }); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get( - current_shortstatehash, - &StateEventType::RoomEncryption, - "", - )? - .is_some(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = - services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - let joined_since_last_sync = - since_sender_member.is_none_or(|member| { - member.membership != MembershipState::Join - }); - - let new_encrypted_room = - encrypted_room && since_encryption.is_none(); - if encrypted_room { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, event_id) in current_state_ids { - if since_state_ids.get(&key) != Some(&event_id) { - let Some(pdu) = - services().rooms.timeline.get_pdu(&event_id)? - else { - error!(%event_id, "Event in state not found"); - continue; - }; - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - UserId::parse(state_key.clone()) - .map_err(|_| { - Error::bad_database( - "Invalid UserId in member \ - PDU.", - ) - })?; - - if user_id == sender_user { - continue; - } - - let new_membership = - serde_json::from_str::< - RoomMemberEventContent, - >( - pdu.content.get() - ) - .map_err(|_| { - Error::bad_database( - "Invalid PDU in database.", - ) - })? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted - // room - if !share_encrypted_room( - &sender_user, - &user_id, - room_id, - )? { - device_list_changes - .insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left - // encrypted rooms we are in - left_encrypted_users - .insert(user_id); - } - _ => {} - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all - // joined users - device_list_changes.extend( - services() - .rooms - .state_cache - .room_members(room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to - // the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't - // share an encrypted room with the target - // already - !share_encrypted_room( - &sender_user, - user_id, - room_id, - ) - .unwrap_or(false) - }), - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services() - .users - .keys_changed(room_id.as_ref(), globalsince, None) - .filter_map(Result::ok), - ); - } - for user_id in left_encrypted_users { - let dont_share_encrypted_room = services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(Result::ok) - .filter_map(|other_room_id| { - Some( - services() - .rooms - .state_accessor - .room_state_get( - &other_room_id, - &StateEventType::RoomEncryption, - "", - ) - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); - // If the user doesn't share an encrypted room with the target - // anymore, we need to tell them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - } - - let mut lists = BTreeMap::new(); // and required state let mut todo_rooms: BTreeMap = BTreeMap::new(); - for (list_id, list) in body.lists { - trace!(list_id, ?list, "Collecting rooms in list"); - - if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { - continue; - } - - let mut list_room_ids = BTreeSet::new(); - for (mut from, mut to) in list.ranges { - from = from.clamp( - uint!(0), - UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), - ); - to = to.clamp( - from, - UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), - ); - let room_ids = - all_joined_rooms[from.try_into().unwrap_or(usize::MAX) - ..=to.try_into().unwrap_or(usize::MAX)] - .to_vec(); - list_room_ids.extend(room_ids); - } - for room_id in &list_room_ids { - todo_rooms.entry(room_id.clone()).or_default().update( - list.room_details.required_state.clone(), - list.room_details.timeline_limit, + let lists = body + .lists + .into_iter() + .filter_map(|(list_id, list)| { + let rooms = rooms_in_list( + &list_id, + list, + &all_joined_rooms, &known_rooms, - room_id, - ); - } - let num_rooms = list_room_ids.len(); - trace!(list_id, num_rooms, "Done collecting rooms"); - - lists.insert( - list_id.clone(), - sync_events::v5::response::List { - count: UInt::try_from(num_rooms).unwrap_or(UInt::MAX), - }, - ); - } + &mut todo_rooms, + )?; + Some((list_id, rooms)) + }) + .collect(); for (room_id, room) in &body.room_subscriptions { if !services().rooms.metadata.exists(room_id)? { @@ -395,160 +156,10 @@ pub(crate) async fn sync_events_v5_route( ); let mut rooms = BTreeMap::new(); - for (room_id, todo_room) in &todo_rooms { - trace!( - room_id = room_id.as_str(), - ?todo_room, - "Processing matched room" - ); - let roomsincecount = PduCount::Normal(todo_room.roomsince); - - let (timeline_pdus, limited) = load_timeline( - &sender_user, - room_id, - roomsincecount, - todo_room.timeline_limit, - )?; - - if todo_room.roomsince != 0 && timeline_pdus.is_empty() { - trace!("No new timeline events, skipping"); - continue; + for (room_id, todo_room) in todo_rooms { + if let Some(room) = process_room(&sender_user, &room_id, &todo_room)? { + rooms.insert(room_id.clone(), room); } - - let prev_batch = timeline_pdus - .first() - .map(|(pdu_count, _)| match pdu_count { - PduCount::Backfilled(_) => { - error!("Timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - }) - .or_else(|| { - (todo_room.roomsince != 0) - .then(|| todo_room.roomsince.to_string()) - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let required_state = todo_room - .required_state_request - .iter() - .filter_map(|state| { - services() - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .ok() - .flatten() - .map(|state| state.to_sync_state_event()) - }) - .collect(); - - // Heroes - let heroes = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(Result::ok) - .filter(|member| member != &sender_user) - .filter_map(|member| { - services() - .rooms - .state_accessor - .get_member(room_id, &member) - .ok() - .flatten() - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }) - }) - .take(5) - .collect::>(); - let name = match &*heroes { - [] => None, - [(only, _)] => Some(only.clone()), - [firsts @ .., (last, _)] => Some({ - let firsts = firsts - .iter() - .map(|(name, _)| name.clone()) - .collect::>() - .join(", "); - - format!("{firsts} and {last}") - }), - }; - - let room = sync_events::v5::response::Room { - name: services().rooms.state_accessor.get_name(room_id)?.or(name), - avatar: if let [(_name, Some(avatar))] = &*heroes { - JsOption::Some(avatar.clone()) - } else { - match services().rooms.state_accessor.get_avatar(room_id)? { - JsOption::Some(avatar) => JsOption::from_option(avatar.url), - JsOption::Null => JsOption::Null, - JsOption::Undefined => JsOption::Undefined, - } - }, - initial: Some(todo_room.roomsince == 0), - is_dm: None, - invite_state: None, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services() - .rooms - .user - .highlight_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services() - .rooms - .user - .notification_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - services() - .rooms - .state_cache - .room_joined_count(room_id)? - .map(UInt::new_saturating) - .unwrap_or(uint!(0)), - ), - invited_count: Some( - services() - .rooms - .state_cache - .room_invited_count(room_id)? - .map(UInt::new_saturating) - .unwrap_or(uint!(0)), - ), - // Count events in timeline greater than global sync counter - num_live: None, - // TODO - bump_stamp: None, - // TODO - heroes: None, - }; - trace!(room_id = room_id.as_str(), ?room, "Built room data"); - - rooms.insert(room_id.clone(), room); } if rooms @@ -589,10 +200,7 @@ pub(crate) async fn sync_events_v5_route( }) .transpose()?, e2ee: sync_events::v5::response::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, + device_lists: device_lists.unwrap_or_default(), device_one_time_keys_count: services() .users .count_one_time_keys(&sender_user, &sender_device)?, @@ -627,3 +235,426 @@ pub(crate) async fn sync_events_v5_route( }, })) } + +#[allow(clippy::too_many_lines)] +#[tracing::instrument(skip_all)] +async fn get_e2ee_data( + sender_user: &UserId, + globalsince: u64, + all_joined_rooms: &[OwnedRoomId], +) -> Result { + // Users that have left any encrypted rooms the sender was in + let mut left_encrypted_users = HashSet::new(); + + // Look for device list updates of this account + let mut device_list_changes: HashSet<_> = services() + .users + .keys_changed(sender_user.as_ref(), globalsince, None) + .filter_map(Result::ok) + .collect(); + + for room_id in all_joined_rooms { + let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + else { + error!(%room_id, "Room has no state"); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(room_id, globalsince)?; + + let since_sender_member: Option = + since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + }) + .ok() + }); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get( + current_shortstatehash, + &StateEventType::RoomEncryption, + "", + )? + .is_some(); + + if let Some(since_shortstatehash) = since_shortstatehash { + // Skip if there are only timeline changes + if since_shortstatehash == current_shortstatehash { + continue; + } + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + let joined_since_last_sync = + since_sender_member.is_none_or(|member| { + member.membership != MembershipState::Join + }); + + let new_encrypted_room = + encrypted_room && since_encryption.is_none(); + if encrypted_room { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, event_id) in current_state_ids { + if since_state_ids.get(&key) != Some(&event_id) { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| { + Error::bad_database( + "Invalid UserId in member PDU.", + ) + })?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::< + RoomMemberEventContent, + >( + pdu.content.get() + ) + .map_err(|_| { + Error::bad_database( + "Invalid PDU in database.", + ) + })? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted + // room + if !share_encrypted_room( + sender_user, + &user_id, + room_id, + )? { + device_list_changes.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left + // encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + } + if joined_since_last_sync || new_encrypted_room { + // If the user is in a new encrypted room, give them all + // joined users + device_list_changes.extend( + services() + .rooms + .state_cache + .room_members(room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to + // the sender + sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't + // share an encrypted room with the target + // already + !share_encrypted_room( + sender_user, + user_id, + room_id, + ) + .unwrap_or(false) + }), + ); + } + } + } + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), globalsince, None) + .filter_map(Result::ok), + ); + } + + let mut device_list_left = HashSet::new(); + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.to_owned(), user_id.clone()])? + .filter_map(Result::ok) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get( + &other_room_id, + &StateEventType::RoomEncryption, + "", + ) + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target + // anymore, we need to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + Ok(DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }) +} + +#[tracing::instrument( + skip_all, + fields(list_id = list_id, ?list), +)] +fn rooms_in_list( + list_id: &str, + list: sync_events::v5::request::List, + all_joined_rooms: &[OwnedRoomId], + known_rooms: &BTreeMap, + todo_rooms: &mut BTreeMap, +) -> Option { + trace!(list_id, ?list, "Collecting rooms in list"); + + if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { + return None; + } + + let mut list_room_ids: BTreeSet = BTreeSet::new(); + for (mut from, mut to) in list.ranges { + from = from.clamp( + uint!(0), + UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), + ); + to = to.clamp( + from, + UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), + ); + let room_ids = all_joined_rooms[from.try_into().unwrap_or(usize::MAX) + ..=to.try_into().unwrap_or(usize::MAX)] + .to_vec(); + list_room_ids.extend(room_ids); + } + for room_id in &list_room_ids { + todo_rooms.entry(room_id.clone()).or_default().update( + list.room_details.required_state.clone(), + list.room_details.timeline_limit, + known_rooms, + room_id, + ); + } + let num_rooms = list_room_ids.len(); + trace!(list_id, num_rooms, "Done collecting rooms"); + + Some(sync_events::v5::response::List { + count: UInt::try_from(num_rooms).unwrap_or(UInt::MAX), + }) +} + +#[allow(clippy::too_many_lines)] +#[tracing::instrument(skip(sender_user))] +fn process_room( + sender_user: &UserId, + room_id: &RoomId, + todo_room: &TodoRoom, +) -> Result> { + let roomsincecount = PduCount::Normal(todo_room.roomsince); + + let (timeline_pdus, limited) = load_timeline( + sender_user, + room_id, + roomsincecount, + todo_room.timeline_limit, + )?; + + if todo_room.roomsince != 0 && timeline_pdus.is_empty() { + return Ok(None); + } + + let prev_batch = timeline_pdus + .first() + .map(|(pdu_count, _)| match pdu_count { + PduCount::Backfilled(_) => { + error!("Timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + }) + .or_else(|| { + (todo_room.roomsince != 0).then(|| todo_room.roomsince.to_string()) + }); + + let room_events: Vec<_> = + timeline_pdus.iter().map(|(_, pdu)| pdu.to_sync_room_event()).collect(); + + let required_state = todo_room + .required_state_request + .iter() + .filter_map(|state| { + services() + .rooms + .state_accessor + .room_state_get(room_id, &state.0, &state.1) + .ok() + .flatten() + .map(|state| state.to_sync_state_event()) + }) + .collect(); + + // Heroes + let heroes = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member != sender_user) + .filter_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }) + }) + .take(5) + .collect::>(); + let name = match &*heroes { + [] => None, + [(only, _)] => Some(only.clone()), + [firsts @ .., (last, _)] => Some({ + let firsts = firsts + .iter() + .map(|(name, _)| name.clone()) + .collect::>() + .join(", "); + + format!("{firsts} and {last}") + }), + }; + + let room = sync_events::v5::response::Room { + name: services().rooms.state_accessor.get_name(room_id)?.or(name), + avatar: if let [(_name, Some(avatar))] = &*heroes { + JsOption::Some(avatar.clone()) + } else { + match services().rooms.state_accessor.get_avatar(room_id)? { + JsOption::Some(avatar) => JsOption::from_option(avatar.url), + JsOption::Null => JsOption::Null, + JsOption::Undefined => JsOption::Undefined, + } + }, + initial: Some(todo_room.roomsince == 0), + is_dm: None, + invite_state: None, + unread_notifications: UnreadNotificationsCount { + highlight_count: Some( + services() + .rooms + .user + .highlight_count(sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + notification_count: Some( + services() + .rooms + .user + .notification_count(sender_user, room_id)? + .try_into() + .expect("notification count can't go that high"), + ), + }, + timeline: room_events, + required_state, + prev_batch, + limited, + joined_count: Some( + services() + .rooms + .state_cache + .room_joined_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + invited_count: Some( + services() + .rooms + .state_cache + .room_invited_count(room_id)? + .map(UInt::new_saturating) + .unwrap_or(uint!(0)), + ), + // Count events in timeline greater than global sync counter + num_live: None, + // TODO + bump_stamp: None, + // TODO + heroes: None, + }; + trace!(?room, "Built room data"); + + Ok(Some(room)) +} From ede95dcee59c06674bffc1a91a1a0635d63692d6 Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 27 Mar 2025 18:33:30 +0000 Subject: [PATCH 17/33] SSS: implement list filters --- src/api/client_server/sync/msc4186.rs | 315 ++++++++++++++++++++------ 1 file changed, 247 insertions(+), 68 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index f4e579bf..a50dda46 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -10,20 +10,38 @@ use std::{ use ruma::{ api::client::{ - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + sync::sync_events::{ + self, v5::request::ListFilters, DeviceLists, + UnreadNotificationsCount, + }, uiaa::UiaaResponse, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, TimelineEventType, + direct::DirectEventContent, + room::{ + create::RoomCreateEventContent, + encryption::PossiblyRedactedRoomEncryptionEventContent, + member::{MembershipState, RoomMemberEventContent}, + }, + AnyStrippedStateEvent, PossiblyRedactedStateEventContent, + StateEventType, StrippedStateEvent, TimelineEventType, }, + room::RoomType, + serde::Raw, uint, JsOption, OwnedRoomId, RoomId, UInt, UserId, }; +use serde::de::DeserializeOwned; use tracing::{debug, error, field, trace, warn}; use super::{load_timeline, share_encrypted_room}; use crate::{ - service::{account_data, rooms::timeline::PduCount, users::ConnectionKey}, + service::{ + account_data, + rooms::{ + short::ShortStateHash, state::ExtractType, timeline::PduCount, + }, + users::ConnectionKey, + }, services, Ar, Error, Ra, Result, }; @@ -59,6 +77,163 @@ impl Default for TodoRoom { } } +fn is_dm_room(user: &UserId, room: &RoomId) -> Result { + let Some(event) = + services().account_data.get_global::(user)? + else { + return Ok(false); + }; + + let event = event + .deserialize() + .map_err(|_| Error::bad_database("Invalid m.direct event"))?; + + Ok(event.values().flatten().any(|r| r == room)) +} + +fn is_encrypted_room(current_shortstatehash: ShortStateHash) -> Result { + Ok(services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some()) +} + +fn get_invite_state( + invite_state: &[Raw], +) -> Option> +where + T: PossiblyRedactedStateEventContent + DeserializeOwned, +{ + invite_state + .iter() + .find_map(|ev| ev.deserialize_as::>().ok()) +} + +#[derive(Debug)] +struct RoomData { + id: OwnedRoomId, + current_shortstatehash: ShortStateHash, + is_dm: bool, + is_encrypted: bool, + is_invite: bool, + room_type: Option, +} + +impl RoomData { + #[tracing::instrument] + fn new( + id: OwnedRoomId, + user: &UserId, + invite_state: Option<&[Raw]>, + ) -> Result { + let current_shortstatehash = services() + .rooms + .state + .get_room_shortstatehash(&id)? + .ok_or_else(|| Error::bad_database("Room has no state"))?; + + let room_type = if let Some(invite_state) = &invite_state { + get_invite_state::(invite_state) + .and_then(|e| e.content.room_type) + } else { + services().rooms.state.get_create_content::(&id)? + }; + + let is_dm = match is_dm_room(user, &id) { + Ok(x) => x, + Err(error) => { + error!(%error, %user, "Invalid m.direct account data event"); + false + } + }; + let is_encrypted = if let Some(invite_state) = &invite_state { + get_invite_state::( + invite_state, + ) + .is_some() + } else { + is_encrypted_room(current_shortstatehash)? + }; + let is_invite = invite_state.is_some(); + + Ok(Self { + id, + current_shortstatehash, + is_dm, + is_encrypted, + is_invite, + room_type, + }) + } + + #[tracing::instrument(skip(self), fields(room_id = self.id.as_str()))] + fn matches_filter(&self, filter_data: &ListFilters) -> Result { + if let Some(is_dm) = filter_data.is_dm { + if self.is_dm != is_dm { + return Ok(false); + } + } + if let Some(is_encrypted) = filter_data.is_encrypted { + if self.is_encrypted != is_encrypted { + return Ok(false); + } + } + if let Some(is_invite) = filter_data.is_invite { + if self.is_invite != is_invite { + return Ok(false); + } + } + + let room_type = self.room_type.clone().into(); + if filter_data.not_room_types.contains(&room_type) { + return Ok(false); + } + if !filter_data.room_types.is_empty() + && !filter_data.room_types.contains(&room_type) + { + return Ok(false); + } + + Ok(true) + } +} + +#[tracing::instrument(skip_all)] +fn joined_rooms_data(sender_user: &UserId) -> Vec { + services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(Result::ok) + .filter_map(move |id| { + RoomData::new(id.clone(), sender_user, None) + .inspect_err(|error| { + error!(%error, room_id = %id, "Failed to get data for room, skipping"); + }) + .ok() + }).collect() +} + +#[tracing::instrument(skip_all)] +fn invited_rooms_data(sender_user: &UserId) -> Vec { + services() + .rooms + .state_cache + .rooms_invited(sender_user) + .filter_map(Result::ok) + .filter_map(move |(id, invite_state)| { + RoomData::new(id.clone(), sender_user, Some(&invite_state)) + .inspect_err(|error| { + error!( + %error, room_id = %id, "Failed to get data for room, skipping" + ); + }) + .ok() + }) + .collect() +} + #[allow(clippy::too_many_lines)] #[tracing::instrument(skip_all, fields( pos, @@ -96,12 +271,7 @@ pub(crate) async fn sync_events_v5_route( let known_rooms = services().users.get_rooms_in_connection(connection_key.clone()); - let all_joined_rooms = services() - .rooms - .state_cache - .rooms_joined(&sender_user) - .filter_map(Result::ok) - .collect::>(); + let all_joined_rooms = joined_rooms_data(&sender_user); if body.extensions.to_device.enabled.unwrap_or(false) { services().users.remove_to_device_events( @@ -118,21 +288,27 @@ pub(crate) async fn sync_events_v5_route( None }; - // and required state + let mut all_rooms = all_joined_rooms; + all_rooms.extend(invited_rooms_data(&sender_user)); + + let all_room_ids: Vec<_> = all_rooms.iter().map(|r| r.id.clone()).collect(); + let all_room_ids: Vec<_> = all_room_ids.iter().map(|id| &**id).collect(); + let mut todo_rooms: BTreeMap = BTreeMap::new(); let lists = body .lists .into_iter() - .filter_map(|(list_id, list)| { + .map(|(list_id, list)| { let rooms = rooms_in_list( &list_id, list, - &all_joined_rooms, + &all_rooms, + &all_room_ids, &known_rooms, &mut todo_rooms, - )?; - Some((list_id, rooms)) + ); + (list_id, rooms) }) .collect(); @@ -241,7 +417,7 @@ pub(crate) async fn sync_events_v5_route( async fn get_e2ee_data( sender_user: &UserId, globalsince: u64, - all_joined_rooms: &[OwnedRoomId], + all_joined_rooms: &[RoomData], ) -> Result { // Users that have left any encrypted rooms the sender was in let mut left_encrypted_users = HashSet::new(); @@ -253,14 +429,13 @@ async fn get_e2ee_data( .filter_map(Result::ok) .collect(); - for room_id in all_joined_rooms { - let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - else { - error!(%room_id, "Room has no state"); - continue; - }; - + for RoomData { + id: room_id, + current_shortstatehash, + is_encrypted, + .. + } in all_joined_rooms + { let since_shortstatehash = services() .rooms .user @@ -288,19 +463,9 @@ async fn get_e2ee_data( .ok() }); - let encrypted_room = services() - .rooms - .state_accessor - .state_get( - current_shortstatehash, - &StateEventType::RoomEncryption, - "", - )? - .is_some(); - if let Some(since_shortstatehash) = since_shortstatehash { // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { + if since_shortstatehash == *current_shortstatehash { continue; } @@ -316,12 +481,12 @@ async fn get_e2ee_data( }); let new_encrypted_room = - encrypted_room && since_encryption.is_none(); - if encrypted_room { + *is_encrypted && since_encryption.is_none(); + if *is_encrypted { let current_state_ids = services() .rooms .state_accessor - .state_full_ids(current_shortstatehash) + .state_full_ids(*current_shortstatehash) .await?; let since_state_ids = services() .rooms @@ -465,45 +630,59 @@ async fn get_e2ee_data( fn rooms_in_list( list_id: &str, list: sync_events::v5::request::List, - all_joined_rooms: &[OwnedRoomId], + all_rooms: &[RoomData], + all_room_ids: &[&RoomId], known_rooms: &BTreeMap, todo_rooms: &mut BTreeMap, -) -> Option { +) -> sync_events::v5::response::List { trace!(list_id, ?list, "Collecting rooms in list"); - if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { - return None; + let matching_room_ids_buf: Vec<&RoomId>; + let matching_room_ids = if let Some(filters) = list.filters.as_ref() { + matching_room_ids_buf = all_rooms + .iter() + .filter_map(|r| { + match r.matches_filter(filters) { + Ok(pass) => pass.then_some(&*r.id), + Err(error) => { + warn!(%error, ?filters, room_id=r.id.as_str(), "Failed to evaluate list filter, skipping room"); + None + } + } + }) + .collect(); + matching_room_ids_buf.as_slice() + } else { + all_room_ids + }; + + if !matching_room_ids.is_empty() { + let mut list_room_ids: BTreeSet<&RoomId> = BTreeSet::new(); + for (from, to) in list.ranges { + let from = usize::try_from(from) + .unwrap_or(usize::MAX) + .clamp(0, matching_room_ids.len() - 1); + let to = usize::try_from(to) + .unwrap_or(usize::MAX) + .clamp(from, matching_room_ids.len() - 1); + list_room_ids.extend(&matching_room_ids[from..=to]); + } + for room_id in list_room_ids { + todo_rooms.entry(room_id.to_owned()).or_default().update( + list.room_details.required_state.clone(), + list.room_details.timeline_limit, + known_rooms, + room_id, + ); + } } - let mut list_room_ids: BTreeSet = BTreeSet::new(); - for (mut from, mut to) in list.ranges { - from = from.clamp( - uint!(0), - UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), - ); - to = to.clamp( - from, - UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), - ); - let room_ids = all_joined_rooms[from.try_into().unwrap_or(usize::MAX) - ..=to.try_into().unwrap_or(usize::MAX)] - .to_vec(); - list_room_ids.extend(room_ids); - } - for room_id in &list_room_ids { - todo_rooms.entry(room_id.clone()).or_default().update( - list.room_details.required_state.clone(), - list.room_details.timeline_limit, - known_rooms, - room_id, - ); - } - let num_rooms = list_room_ids.len(); + let num_rooms = matching_room_ids.len(); trace!(list_id, num_rooms, "Done collecting rooms"); - Some(sync_events::v5::response::List { + sync_events::v5::response::List { count: UInt::try_from(num_rooms).unwrap_or(UInt::MAX), - }) + } } #[allow(clippy::too_many_lines)] From 891eb410cc15291d17505d41039daf39e204873b Mon Sep 17 00:00:00 2001 From: Lambda Date: Sun, 30 Mar 2025 13:15:33 +0000 Subject: [PATCH 18/33] SSS: implement state filtering --- src/api/client_server/sync/msc4186.rs | 228 ++++++++++++++++++++++++-- 1 file changed, 212 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index a50dda46..a3726faa 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -45,9 +45,111 @@ use crate::{ services, Ar, Error, Ra, Result, }; +#[derive(Debug)] +enum RequiredStateKeys { + All, + Selected(BTreeSet), +} +impl RequiredStateKeys { + fn merge(&mut self, key: String) { + match self { + RequiredStateKeys::All => { + // nothing to do, we're already getting all keys + } + RequiredStateKeys::Selected(keys) => { + if key == "*" { + *self = RequiredStateKeys::All; + } else { + keys.insert(key); + } + } + } + } +} + +#[derive(Debug)] +struct RequiredState { + /// Indicates that a `("*", "*")` tuple was present in `required_state`. + /// When `true`, all state events are sent by default, except for state + /// event types that are present in `filters`, for which only the + /// request state keys are sent. + all_events: bool, + filters: BTreeMap, +} +impl RequiredState { + fn update( + &mut self, + required_state: Vec<(StateEventType, String)>, + sender_user: &UserId, + ) { + let contains_wildcard = required_state + .iter() + .any(|(typ, key)| typ.to_string() == "*" && key == "*"); + + let mut old_filters = None; + if contains_wildcard { + if self.all_events { + // filters already contains existing negative filters, remember + // them and only apply new filters that were + // already there previously + old_filters = Some(std::mem::take(&mut self.filters)); + } else { + // clear existing positive filters + self.filters = BTreeMap::new(); + } + + self.all_events = true; + } else if self.all_events { + // all events were requested previously, don't add any additional + // positive filters + return; + } + + for (typ, mut key) in required_state { + if typ.to_string() == "*" { + continue; + } + if key == "$ME" { + key = sender_user.to_string(); + } + + if let Some(old_filters) = old_filters.as_mut() { + // re-insert the old negative filter if it matches the new + // negative filter exactly + if let Some(old_filter) = old_filters.remove(&typ) { + if let RequiredStateKeys::Selected(state_keys) = &old_filter + { + if state_keys.len() == 1 && state_keys.contains(&key) { + self.filters.insert(typ, old_filter); + } + } + } + } else { + // add the key to the filter for this event type + self.filters + .entry(typ) + .or_insert_with(|| { + RequiredStateKeys::Selected(BTreeSet::new()) + }) + .merge(key); + } + } + } + + fn matches(&self, typ: &StateEventType, key: &str) -> bool { + match self.filters.get(typ) { + Some(keys) => match keys { + RequiredStateKeys::All => true, + RequiredStateKeys::Selected(keys) => keys.contains(key), + }, + None => self.all_events, + } + } +} + #[derive(Debug)] struct TodoRoom { - required_state_request: BTreeSet<(StateEventType, String)>, + required_state: RequiredState, timeline_limit: u64, roomsince: u64, } @@ -58,8 +160,10 @@ impl TodoRoom { timeline_limit: UInt, known_rooms: &BTreeMap, room_id: &RoomId, + sender_user: &UserId, ) { - self.required_state_request.extend(required_state); + self.required_state.update(required_state, sender_user); + self.timeline_limit = self.timeline_limit.max(u64::from(timeline_limit).min(100)); // 0 means unknown because it got out of date @@ -70,7 +174,10 @@ impl TodoRoom { impl Default for TodoRoom { fn default() -> Self { Self { - required_state_request: BTreeSet::new(), + required_state: RequiredState { + all_events: false, + filters: BTreeMap::new(), + }, timeline_limit: 0, roomsince: u64::MAX, } @@ -307,6 +414,7 @@ pub(crate) async fn sync_events_v5_route( &all_room_ids, &known_rooms, &mut todo_rooms, + &sender_user, ); (list_id, rooms) }) @@ -322,6 +430,7 @@ pub(crate) async fn sync_events_v5_route( room.timeline_limit, &known_rooms, room_id, + &sender_user, ); } @@ -634,6 +743,7 @@ fn rooms_in_list( all_room_ids: &[&RoomId], known_rooms: &BTreeMap, todo_rooms: &mut BTreeMap, + sender_user: &UserId, ) -> sync_events::v5::response::List { trace!(list_id, ?list, "Collecting rooms in list"); @@ -673,6 +783,7 @@ fn rooms_in_list( list.room_details.timeline_limit, known_rooms, room_id, + sender_user, ); } } @@ -721,19 +832,104 @@ fn process_room( let room_events: Vec<_> = timeline_pdus.iter().map(|(_, pdu)| pdu.to_sync_room_event()).collect(); - let required_state = todo_room - .required_state_request - .iter() - .filter_map(|state| { - services() - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .ok() - .flatten() - .map(|state| state.to_sync_state_event()) - }) - .collect(); + let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + else { + error!(%room_id, "Room has no state"); + return Ok(None); + }; + + let need_scan = todo_room.required_state.all_events + || todo_room + .required_state + .filters + .iter() + .any(|(_, keys)| matches!(keys, RequiredStateKeys::All)); + let required_state = if need_scan { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(current_shortstatehash)? + .pop() + .expect("there is always one layer") + .full_state; + full_state + .iter() + .filter_map(|compressed| { + let Ok((typ, key)) = services() + .rooms + .short + .get_statekey_from_short(compressed.state) + else { + warn!( + ?compressed, + "Failed to get info for shortstatekey, skipping" + ); + return None; + }; + + if !todo_room.required_state.matches(&typ, &key) { + return None; + } + + let shorteventid = compressed.event; + let pdu = match services() + .rooms + .short + .get_eventid_from_short(shorteventid) + { + Ok(event_id) => { + services().rooms.timeline.get_pdu(&event_id) + } + Err(error) => { + warn!( + %error, + %typ, + key, + ?shorteventid, + "Failed to get event ID from short event ID" + ); + return None; + } + }; + match pdu { + Ok(Some(pdu)) => Some(pdu.to_sync_state_event()), + Ok(None) => None, + Err(error) => { + warn!(%error, %typ, key, "Failed to get state PDU"); + None + } + } + }) + .collect() + } else { + todo_room + .required_state + .filters + .iter() + .flat_map(|(typ, keys)| { + let RequiredStateKeys::Selected(keys) = keys else { + panic!( + "wildcard key should have triggered a full state scan" + ); + }; + keys.iter().filter_map(move |key| { + match services().rooms.state_accessor.state_get( + current_shortstatehash, + typ, + key, + ) { + Ok(Some(pdu)) => Some(pdu.to_sync_state_event()), + Ok(None) => None, + Err(error) => { + warn!(%error, %typ, key, "Failed to get state PDU"); + None + } + } + }) + }) + .collect() + }; // Heroes let heroes = services() From 166a2690349f0ba6deffaf25c2cf42a4d4568e0c Mon Sep 17 00:00:00 2001 From: Lambda Date: Fri, 16 May 2025 18:45:12 +0000 Subject: [PATCH 19/33] SSS: fix timeline pagination --- src/api/client_server/sync/msc4186.rs | 6 ++- src/service/users.rs | 70 ++++++++++++++++++++++++--- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index a3726faa..5b210702 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -375,8 +375,9 @@ pub(crate) async fn sync_events_v5_route( services().users.forget_sync_request_connection(&connection_key); } - let known_rooms = - services().users.get_rooms_in_connection(connection_key.clone()); + let known_rooms = services() + .users + .get_rooms_in_connection(connection_key.clone(), globalsince); let all_joined_rooms = joined_rooms_data(&sender_user); @@ -438,6 +439,7 @@ pub(crate) async fn sync_events_v5_route( connection_key.clone(), todo_rooms.keys().cloned().collect(), globalsince, + next_batch, ); let mut rooms = BTreeMap::new(); diff --git a/src/service/users.rs b/src/service/users.rs index 928843ae..192b0597 100644 --- a/src/service/users.rs +++ b/src/service/users.rs @@ -19,9 +19,24 @@ mod data; pub(crate) use data::Data; +#[derive(Debug)] +struct KnownRooms { + /// The `pos` value of the request that these `room_since` values apply to + pos: u64, + /// `since` values for rooms that have been sent previously + room_since: BTreeMap, +} + #[derive(Debug, Default)] pub(crate) struct SlidingSyncCache { - known_rooms: BTreeMap, + /// `since` values for rooms in the current/previous request. Needed in + /// case the response doesn't arrive and the client requests the same + /// `pos` value again. + current_known_rooms: Option, + /// Overlay on top of `current_known_rooms` of `since` values for rooms in + /// the expected next request (where the `pos` value should be the + /// `pos` value from our response). + next_known_rooms: Option, } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -66,28 +81,69 @@ impl Service { self.connections.lock().unwrap().remove(connection_key); } + #[tracing::instrument(skip(self))] pub(crate) fn get_rooms_in_connection( &self, connection_key: ConnectionKey, + pos: u64, ) -> BTreeMap { let cached = self.get_cache_entry(connection_key); - let cached = cached.lock().unwrap(); + let mut cached = cached.lock().unwrap(); + let cached = &mut *cached; - cached.known_rooms.clone() + let current_known_rooms = + cached.current_known_rooms.get_or_insert(KnownRooms { + pos, + room_since: BTreeMap::new(), + }); + + if current_known_rooms.pos == pos { + // Another request for a previous `pos` value, invalidate the next + // result + cached.next_known_rooms = None; + } else if let Some(next_known_rooms) = + cached.next_known_rooms.take().filter(|x| x.pos == pos) + { + // Merge overlay into current_known_rooms + current_known_rooms.pos = next_known_rooms.pos; + current_known_rooms.room_since.extend(next_known_rooms.room_since); + } else { + // Not a repeated request, and we don't have calculated values for a + // next request, start over + *current_known_rooms = KnownRooms { + pos, + room_since: BTreeMap::new(), + }; + } + + current_known_rooms.room_since.clone() } + #[tracing::instrument(skip(self))] pub(crate) fn update_sync_known_rooms( &self, connection_key: ConnectionKey, new_cached_rooms: BTreeSet, - globalsince: u64, + pos: u64, + next_batch: u64, ) { let cached = self.get_cache_entry(connection_key); let mut cached = cached.lock().unwrap(); - for roomid in new_cached_rooms { - cached.known_rooms.insert(roomid, globalsince); - } + assert_eq!( + cached.current_known_rooms.as_ref().map(|x| x.pos), + Some(pos), + "current known rooms should match current request's pos" + ); + + // Add an overlay to the current request's known rooms + cached.next_known_rooms = Some(KnownRooms { + pos: next_batch, + room_since: new_cached_rooms + .into_iter() + .map(|room_id| (room_id, next_batch)) + .collect(), + }); } /// Check if account is deactivated From a2bbfd124229800cbab7eb08bd6f4cae19d07431 Mon Sep 17 00:00:00 2001 From: Lambda Date: Tue, 20 May 2025 17:02:22 +0000 Subject: [PATCH 20/33] SSS: Fix room names --- src/api/client_server/sync/msc4186.rs | 84 +++++++++++---------------- 1 file changed, 33 insertions(+), 51 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 5b210702..72ce3a24 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -11,8 +11,9 @@ use std::{ use ruma::{ api::client::{ sync::sync_events::{ - self, v5::request::ListFilters, DeviceLists, - UnreadNotificationsCount, + self, + v5::{request::ListFilters, response::Hero}, + DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, @@ -933,55 +934,37 @@ fn process_room( .collect() }; - // Heroes - let heroes = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(Result::ok) - .filter(|member| member != sender_user) - .filter_map(|member| { - services() - .rooms - .state_accessor - .get_member(room_id, &member) - .ok() - .flatten() - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }) - }) - .take(5) - .collect::>(); - let name = match &*heroes { - [] => None, - [(only, _)] => Some(only.clone()), - [firsts @ .., (last, _)] => Some({ - let firsts = firsts - .iter() - .map(|(name, _)| name.clone()) - .collect::>() - .join(", "); - - format!("{firsts} and {last}") - }), - }; + let name = services().rooms.state_accessor.get_name(room_id)?; + let heroes = name.is_none().then(|| { + services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member != sender_user) + .filter_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| Hero { + user_id: member, + name: memberevent.displayname, + avatar: memberevent.avatar_url, + }) + }) + .take(5) + .collect::>() + }); let room = sync_events::v5::response::Room { - name: services().rooms.state_accessor.get_name(room_id)?.or(name), - avatar: if let [(_name, Some(avatar))] = &*heroes { - JsOption::Some(avatar.clone()) - } else { - match services().rooms.state_accessor.get_avatar(room_id)? { - JsOption::Some(avatar) => JsOption::from_option(avatar.url), - JsOption::Null => JsOption::Null, - JsOption::Undefined => JsOption::Undefined, - } + name, + avatar: match services().rooms.state_accessor.get_avatar(room_id)? { + JsOption::Some(avatar) => JsOption::from_option(avatar.url), + JsOption::Null => JsOption::Null, + JsOption::Undefined => JsOption::Undefined, }, initial: Some(todo_room.roomsince == 0), is_dm: None, @@ -1028,8 +1011,7 @@ fn process_room( num_live: None, // TODO bump_stamp: None, - // TODO - heroes: None, + heroes, }; trace!(?room, "Built room data"); From adff2ec6373a8872848f036928d05e5c641e78a5 Mon Sep 17 00:00:00 2001 From: Lambda Date: Tue, 20 May 2025 17:44:56 +0000 Subject: [PATCH 21/33] SSS: implement bump_stamp --- src/api/client_server/sync/msc4186.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 72ce3a24..013f91a0 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -832,6 +832,17 @@ fn process_room( (todo_room.roomsince != 0).then(|| todo_room.roomsince.to_string()) }); + // TODO: consider only message-like PDUs here, rather than all PDUs + let bump_stamp = match services() + .rooms + .timeline + .last_timeline_count(sender_user, room_id)? + { + PduCount::Backfilled(n) | PduCount::Normal(n) => { + Some(UInt::new_saturating(n)) + } + }; + let room_events: Vec<_> = timeline_pdus.iter().map(|(_, pdu)| pdu.to_sync_room_event()).collect(); @@ -1009,8 +1020,7 @@ fn process_room( ), // Count events in timeline greater than global sync counter num_live: None, - // TODO - bump_stamp: None, + bump_stamp, heroes, }; trace!(?room, "Built room data"); From 30951cb6110adaf7587be919c3ce896b1e936bd6 Mon Sep 17 00:00:00 2001 From: Lambda Date: Wed, 28 May 2025 19:57:26 +0000 Subject: [PATCH 22/33] SSS: factor out to_device --- src/api/client_server/sync/msc4186.rs | 37 +++++++++++++++------------ 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 013f91a0..e68bd073 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -43,7 +43,7 @@ use crate::{ }, users::ConnectionKey, }, - services, Ar, Error, Ra, Result, + services, utils, Ar, Error, Ra, Result, }; #[derive(Debug)] @@ -443,6 +443,25 @@ pub(crate) async fn sync_events_v5_route( next_batch, ); + let to_device = if body.extensions.to_device.enabled == Some(true) { + let events = services() + .users + .get_to_device_events(&sender_user, &sender_device)?; + if !events.is_empty() { + debug!( + events = utils::debug_slice_truncated(&events, 3), + "Got new to-device events" + ); + } + + Some(sync_events::v5::response::ToDevice { + events, + next_batch: next_batch.to_string(), + }) + } else { + None + }; + let mut rooms = BTreeMap::new(); for (room_id, todo_room) in todo_rooms { if let Some(room) = process_room(&sender_user, &room_id, &todo_room)? { @@ -472,21 +491,7 @@ pub(crate) async fn sync_events_v5_route( lists, rooms, extensions: sync_events::v5::response::Extensions { - to_device: body - .extensions - .to_device - .enabled - .unwrap_or(false) - .then(|| { - services() - .users - .get_to_device_events(&sender_user, &sender_device) - .map(|events| sync_events::v5::response::ToDevice { - events, - next_batch: next_batch.to_string(), - }) - }) - .transpose()?, + to_device, e2ee: sync_events::v5::response::E2EE { device_lists: device_lists.unwrap_or_default(), device_one_time_keys_count: services() From f5ff294c3e8167db8929db43e7aa66f386cb9dfa Mon Sep 17 00:00:00 2001 From: Lambda Date: Sat, 31 May 2025 17:31:36 +0000 Subject: [PATCH 23/33] SSS: refactor E2EE extension like other extensions --- src/api/client_server/sync/msc4186.rs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index e68bd073..7c6b55fd 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -391,8 +391,20 @@ pub(crate) async fn sync_events_v5_route( } #[allow(clippy::if_then_some_else_none)] - let device_lists = if body.extensions.e2ee.enabled.unwrap_or(false) { - Some(get_e2ee_data(&sender_user, globalsince, &all_joined_rooms).await?) + let e2ee = if body.extensions.e2ee.enabled == Some(true) { + Some(sync_events::v5::response::E2EE { + device_lists: get_e2ee_data( + &sender_user, + globalsince, + &all_joined_rooms, + ) + .await?, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }) } else { None }; @@ -492,14 +504,7 @@ pub(crate) async fn sync_events_v5_route( rooms, extensions: sync_events::v5::response::Extensions { to_device, - e2ee: sync_events::v5::response::E2EE { - device_lists: device_lists.unwrap_or_default(), - device_one_time_keys_count: services() - .users - .count_one_time_keys(&sender_user, &sender_device)?, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }, + e2ee: e2ee.unwrap_or_default(), account_data: sync_events::v5::response::AccountData { global: if body.extensions.account_data.enabled.unwrap_or(false) { From 395d16ca2262a10351518201248be36138295472 Mon Sep 17 00:00:00 2001 From: Lambda Date: Sat, 31 May 2025 22:21:43 +0000 Subject: [PATCH 24/33] SSS: factor out account data extension --- src/api/client_server/sync/msc4186.rs | 38 +++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 7c6b55fd..9d8aef8f 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -474,6 +474,24 @@ pub(crate) async fn sync_events_v5_route( None }; + let account_data = if body.extensions.account_data.enabled == Some(true) { + let global = services() + .account_data + .global_changes_since(&sender_user, globalsince)? + .into_iter() + .map(|(event_type, content)| { + account_data::raw_global_event_from_parts(&event_type, &content) + }) + .collect(); + + Some(sync_events::v5::response::AccountData { + global, + rooms: BTreeMap::new(), + }) + } else { + None + }; + let mut rooms = BTreeMap::new(); for (room_id, todo_room) in todo_rooms { if let Some(room) = process_room(&sender_user, &room_id, &todo_room)? { @@ -505,25 +523,7 @@ pub(crate) async fn sync_events_v5_route( extensions: sync_events::v5::response::Extensions { to_device, e2ee: e2ee.unwrap_or_default(), - account_data: sync_events::v5::response::AccountData { - global: if body.extensions.account_data.enabled.unwrap_or(false) - { - services() - .account_data - .global_changes_since(&sender_user, globalsince)? - .into_iter() - .map(|(event_type, content)| { - account_data::raw_global_event_from_parts( - &event_type, - &content, - ) - }) - .collect() - } else { - Vec::new() - }, - rooms: BTreeMap::new(), - }, + account_data: account_data.unwrap_or_default(), receipts: sync_events::v5::response::Receipts { rooms: BTreeMap::new(), }, From d497a19c2df06a27e71f3cec9614807b5a3e1528 Mon Sep 17 00:00:00 2001 From: Lambda Date: Wed, 28 May 2025 19:57:52 +0000 Subject: [PATCH 25/33] SSS: don't block if there is new extension data --- src/api/client_server/sync/msc4186.rs | 34 ++++++++++++++++++--------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 9d8aef8f..7c7cee3a 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -499,9 +499,31 @@ pub(crate) async fn sync_events_v5_route( } } + let extensions = sync_events::v5::response::Extensions { + to_device, + e2ee: e2ee.unwrap_or_default(), + account_data: account_data.unwrap_or_default(), + receipts: sync_events::v5::response::Receipts { + rooms: BTreeMap::new(), + }, + typing: sync_events::v5::response::Typing { + rooms: BTreeMap::new(), + }, + }; + + let extensions_empty = extensions + .to_device + .as_ref() + .is_none_or(|to_device| to_device.events.is_empty()) + && extensions.e2ee.device_lists.is_empty() + && extensions.account_data.is_empty() + && extensions.receipts.is_empty() + && extensions.typing.is_empty(); + if rooms .iter() .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) + && extensions_empty { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives @@ -520,17 +542,7 @@ pub(crate) async fn sync_events_v5_route( pos: next_batch.to_string(), lists, rooms, - extensions: sync_events::v5::response::Extensions { - to_device, - e2ee: e2ee.unwrap_or_default(), - account_data: account_data.unwrap_or_default(), - receipts: sync_events::v5::response::Receipts { - rooms: BTreeMap::new(), - }, - typing: sync_events::v5::response::Typing { - rooms: BTreeMap::new(), - }, - }, + extensions, })) } From 72c898dd412f0d8357b98542d214dff4b479909b Mon Sep 17 00:00:00 2001 From: Lambda Date: Mon, 19 May 2025 20:08:30 +0000 Subject: [PATCH 26/33] SSS: implement room account data --- src/api/client_server/sync/msc4186.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 7c7cee3a..d65cb6c7 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -484,9 +484,27 @@ pub(crate) async fn sync_events_v5_route( }) .collect(); + let mut rooms_account_data = BTreeMap::new(); + for (room_id, todo_room) in &todo_rooms { + let account_data: Vec<_> = services() + .account_data + .room_changes_since(&sender_user, room_id, todo_room.roomsince)? + .into_iter() + .map(|(event_type, content)| { + account_data::raw_room_event_from_parts( + &event_type, + &content, + ) + }) + .collect(); + if !account_data.is_empty() { + rooms_account_data.insert(room_id.clone(), account_data); + } + } + Some(sync_events::v5::response::AccountData { global, - rooms: BTreeMap::new(), + rooms: rooms_account_data, }) } else { None From 96a9632438ce2ac2245e148b80c02f71fd703573 Mon Sep 17 00:00:00 2001 From: Lambda Date: Wed, 28 May 2025 18:19:30 +0000 Subject: [PATCH 27/33] SSS: implement receipts --- src/api/client_server/sync/msc4186.rs | 64 ++++++++++++++++++++++++--- 1 file changed, 59 insertions(+), 5 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index d65cb6c7..31f5caf3 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -19,13 +19,15 @@ use ruma::{ }, events::{ direct::DirectEventContent, + receipt::ReceiptEventContent, room::{ create::RoomCreateEventContent, encryption::PossiblyRedactedRoomEncryptionEventContent, member::{MembershipState, RoomMemberEventContent}, }, - AnyStrippedStateEvent, PossiblyRedactedStateEventContent, - StateEventType, StrippedStateEvent, TimelineEventType, + AnyStrippedStateEvent, AnySyncEphemeralRoomEvent, + PossiblyRedactedStateEventContent, StateEventType, StrippedStateEvent, + SyncEphemeralRoomEvent, TimelineEventType, }, room::RoomType, serde::Raw, @@ -510,6 +512,60 @@ pub(crate) async fn sync_events_v5_route( None }; + #[allow(clippy::if_then_some_else_none)] + let receipts = if body.extensions.receipts.enabled == Some(true) { + let mut receipts = BTreeMap::new(); + for (room_id, todo_room) in &todo_rooms { + let mut event_content: BTreeMap<_, BTreeMap<_, BTreeMap<_, _>>> = + BTreeMap::new(); + for x in services() + .rooms + .edus + .read_receipt + .readreceipts_since(room_id, todo_room.roomsince) + { + let Ok((_user_id, _, edu)) = x else { + // invalid DB entry + continue; + }; + let Ok(edu) = edu.deserialize() else { + // invalid EDU JSON + continue; + }; + let AnySyncEphemeralRoomEvent::Receipt(edu) = edu else { + // wrong EDU type + continue; + }; + + // merge all receipt EDUs into one + for (event_id, receipts) in edu.content.0 { + let entry = event_content.entry(event_id).or_default(); + for (typ, receipts) in receipts { + let entry = entry.entry(typ).or_default(); + for (user, receipt) in receipts { + entry.insert(user, receipt); + } + } + } + } + + if !event_content.is_empty() { + let Ok(event) = Raw::new(&SyncEphemeralRoomEvent { + content: ReceiptEventContent(event_content), + }) else { + continue; + }; + receipts.insert(room_id.clone(), event); + } + } + + Some(sync_events::v5::response::Receipts { + rooms: receipts, + }) + } else { + None + }; + let mut rooms = BTreeMap::new(); for (room_id, todo_room) in todo_rooms { if let Some(room) = process_room(&sender_user, &room_id, &todo_room)? { @@ -521,9 +577,7 @@ pub(crate) async fn sync_events_v5_route( to_device, e2ee: e2ee.unwrap_or_default(), account_data: account_data.unwrap_or_default(), - receipts: sync_events::v5::response::Receipts { - rooms: BTreeMap::new(), - }, + receipts: receipts.unwrap_or_default(), typing: sync_events::v5::response::Typing { rooms: BTreeMap::new(), }, From 8e3e1b1f8dc14bfe59cacb1aa3b8d560b507b2f6 Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 29 May 2025 20:01:11 +0000 Subject: [PATCH 28/33] SSS: fix to-device event deletion This is the one major fix in SSS: only delete to-device events when the client confirms receipt of the previous response, not simply on every request! --- src/api/client_server/sync/msc4186.rs | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 31f5caf3..a0667eb5 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -384,14 +384,6 @@ pub(crate) async fn sync_events_v5_route( let all_joined_rooms = joined_rooms_data(&sender_user); - if body.extensions.to_device.enabled.unwrap_or(false) { - services().users.remove_to_device_events( - &sender_user, - &sender_device, - globalsince, - )?; - } - #[allow(clippy::if_then_some_else_none)] let e2ee = if body.extensions.e2ee.enabled == Some(true) { Some(sync_events::v5::response::E2EE { @@ -458,6 +450,17 @@ pub(crate) async fn sync_events_v5_route( ); let to_device = if body.extensions.to_device.enabled == Some(true) { + if let Some(until) = + body.extensions.to_device.since.and_then(|s| s.parse().ok()) + { + debug!(until, "Deleting to-device events"); + services().users.remove_to_device_events( + &sender_user, + &sender_device, + until, + )?; + } + let events = services() .users .get_to_device_events(&sender_user, &sender_device)?; From 7b4acd214ca18eb1c2f0452f42c3374f578ec3de Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 29 May 2025 20:02:40 +0000 Subject: [PATCH 29/33] SSS: implement num_live --- src/api/client_server/sync/msc4186.rs | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index a0667eb5..b7f423a5 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -571,7 +571,9 @@ pub(crate) async fn sync_events_v5_route( let mut rooms = BTreeMap::new(); for (room_id, todo_room) in todo_rooms { - if let Some(room) = process_room(&sender_user, &room_id, &todo_room)? { + if let Some(room) = + process_room(&sender_user, &room_id, &todo_room, globalsince)? + { rooms.insert(room_id.clone(), room); } } @@ -902,6 +904,7 @@ fn process_room( sender_user: &UserId, room_id: &RoomId, todo_room: &TodoRoom, + globalsince: u64, ) -> Result> { let roomsincecount = PduCount::Normal(todo_room.roomsince); @@ -940,6 +943,25 @@ fn process_room( } }; + let num_live = Some( + timeline_pdus + .iter() + .filter(|(pdu_count, _)| match pdu_count { + // TODO check logic + PduCount::Backfilled(_) => false, + PduCount::Normal(pdu_count) => { + if globalsince == 0 { + false + } else { + *pdu_count > globalsince + } + } + }) + .count() + .try_into() + .unwrap_or(UInt::MAX), + ); + let room_events: Vec<_> = timeline_pdus.iter().map(|(_, pdu)| pdu.to_sync_room_event()).collect(); @@ -1115,8 +1137,7 @@ fn process_room( .map(UInt::new_saturating) .unwrap_or(uint!(0)), ), - // Count events in timeline greater than global sync counter - num_live: None, + num_live, bump_stamp, heroes, }; From f9b28364053f31e806d205828e213e28fd8d0c5e Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 29 May 2025 20:02:54 +0000 Subject: [PATCH 30/33] SSS: don't set txn_id The field got dropped on the transition from MSC3575 to MSC4186, and including it anyway confuses matrix-rust-sdk into trying to use the (also removed) "sticky" parameters. --- src/api/client_server/sync/msc4186.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index b7f423a5..74f8a397 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -615,7 +615,7 @@ pub(crate) async fn sync_events_v5_route( } Ok(Ra(sync_events::v5::Response { - txn_id: body.txn_id.clone(), + txn_id: None, pos: next_batch.to_string(), lists, rooms, From d51aea7672bceb0d165771a860fae0520a58784f Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 29 May 2025 20:04:38 +0000 Subject: [PATCH 31/33] SSS: add TODOs --- src/api/client_server/sync/msc4186.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 74f8a397..1760f93f 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -393,6 +393,7 @@ pub(crate) async fn sync_events_v5_route( &all_joined_rooms, ) .await?, + // TODO: only include this field when it has changed device_one_time_keys_count: services() .users .count_one_time_keys(&sender_user, &sender_device)?, @@ -972,6 +973,12 @@ fn process_room( return Ok(None); }; + // TODO: invalidate current_shortstatehash and send down all(?) state events + // if effective requested required_state changes between requests: + // + // > If new entries are added to required_state then the server must send + // > down matching current state events. + let need_scan = todo_room.required_state.all_events || todo_room .required_state From 3f315ee68319749748062f89b5ec19797184c4d4 Mon Sep 17 00:00:00 2001 From: Lambda Date: Thu, 29 May 2025 21:37:08 +0000 Subject: [PATCH 32/33] SSS: sort rooms in lists by latest event arrival time --- src/api/client_server/sync/msc4186.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 1760f93f..9e021ef1 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -406,6 +406,9 @@ pub(crate) async fn sync_events_v5_route( let mut all_rooms = all_joined_rooms; all_rooms.extend(invited_rooms_data(&sender_user)); + all_rooms.sort_by_key(|r| { + services().rooms.timeline.last_timeline_count(&sender_user, &r.id).ok() + }); let all_room_ids: Vec<_> = all_rooms.iter().map(|r| r.id.clone()).collect(); let all_room_ids: Vec<_> = all_room_ids.iter().map(|id| &**id).collect(); From f903421c3fdf131d536ebe8336fe2e8fcd28daae Mon Sep 17 00:00:00 2001 From: Lambda Date: Sat, 31 May 2025 22:03:23 +0000 Subject: [PATCH 33/33] SSS: implement typing indications --- src/api/client_server/sync/msc4186.rs | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/sync/msc4186.rs b/src/api/client_server/sync/msc4186.rs index 9e021ef1..ab5fa762 100644 --- a/src/api/client_server/sync/msc4186.rs +++ b/src/api/client_server/sync/msc4186.rs @@ -573,6 +573,28 @@ pub(crate) async fn sync_events_v5_route( None }; + let typing = if body.extensions.typing.enabled == Some(true) { + let mut typing = BTreeMap::new(); + for room_id in todo_rooms.keys() { + if services().rooms.edus.typing.last_typing_update(room_id).await? + > globalsince + { + let event = + services().rooms.edus.typing.typings_all(room_id).await?; + + let Ok(event) = Raw::new(&event) else { + continue; + }; + typing.insert(room_id.clone(), event); + } + } + Some(sync_events::v5::response::Typing { + rooms: typing, + }) + } else { + None + }; + let mut rooms = BTreeMap::new(); for (room_id, todo_room) in todo_rooms { if let Some(room) = @@ -587,9 +609,7 @@ pub(crate) async fn sync_events_v5_route( e2ee: e2ee.unwrap_or_default(), account_data: account_data.unwrap_or_default(), receipts: receipts.unwrap_or_default(), - typing: sync_events::v5::response::Typing { - rooms: BTreeMap::new(), - }, + typing: typing.unwrap_or_default(), }; let extensions_empty = extensions