//! [MSC3575], aka Sliding Sync, aka Sync v3 (even though the endpoint is called //! /v4) support //! //! [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 use std::{ collections::{BTreeMap, BTreeSet, HashSet}, time::Duration, }; use ruma::{ api::client::{ sync::sync_events::{ self, v4::SlidingOp, DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, events::{ room::member::{MembershipState, RoomMemberEventContent}, StateEventType, TimelineEventType, }, uint, JsOption, OwnedRoomId, RoomId, UInt, UserId, }; use tracing::{debug, error}; use super::{load_timeline, share_encrypted_room}; use crate::{ service::{account_data, rooms::timeline::PduCount}, services, Ar, Error, Ra, Result, }; struct TodoRoom { required_state_request: BTreeSet<(StateEventType, String)>, timeline_limit: u64, roomsince: u64, } impl TodoRoom { fn update( &mut self, required_state: Vec<(StateEventType, String)>, timeline_limit: UInt, known_rooms: Option<&BTreeMap>, room_id: &RoomId, ) { self.required_state_request.extend(required_state); self.timeline_limit = self.timeline_limit.max(u64::from(timeline_limit).min(100)); // 0 means unknown because it got out of date self.roomsince = self.roomsince.min( known_rooms.and_then(|k| k.get(room_id)).copied().unwrap_or(0), ); } } impl Default for TodoRoom { fn default() -> Self { Self { required_state_request: BTreeSet::new(), timeline_limit: 0, roomsince: u64::MAX, } } } #[allow(clippy::too_many_lines)] pub(crate) async fn sync_events_v4_route( body: Ar, ) -> Result, Ra> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.next_count()?; let globalsince = body.pos.as_ref().and_then(|string| string.parse().ok()).unwrap_or(0); if globalsince == 0 { if let Some(conn_id) = &body.conn_id { services().users.forget_sync_request_connection( sender_user.clone(), sender_device.clone(), conn_id.clone(), ); } } // Get sticky parameters from cache let known_rooms = services().users.update_sync_request_with_cache( sender_user.clone(), sender_device.clone(), &mut body, ); let all_joined_rooms = services() .rooms .state_cache .rooms_joined(&sender_user) .filter_map(Result::ok) .collect::>(); if body.extensions.to_device.enabled.unwrap_or(false) { services().users.remove_to_device_events( &sender_user, &sender_device, globalsince, )?; } // Users that have left any encrypted rooms the sender was in let mut left_encrypted_users = HashSet::new(); let mut device_list_changes = HashSet::new(); let mut device_list_left = HashSet::new(); if body.extensions.e2ee.enabled.unwrap_or(false) { // Look for device list updates of this account device_list_changes.extend( services() .users .keys_changed(sender_user.as_ref(), globalsince, None) .filter_map(Result::ok), ); for room_id in &all_joined_rooms { let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else { error!(%room_id, "Room has no state"); continue; }; let since_shortstatehash = services() .rooms .user .get_token_shortstatehash(room_id, globalsince)?; let since_sender_member: Option = since_shortstatehash .and_then(|shortstatehash| { services() .rooms .state_accessor .state_get( shortstatehash, &StateEventType::RoomMember, sender_user.as_str(), ) .transpose() }) .transpose()? .and_then(|pdu| { serde_json::from_str(pdu.content.get()) .map_err(|_| { Error::bad_database("Invalid PDU in database.") }) .ok() }); let encrypted_room = services() .rooms .state_accessor .state_get( current_shortstatehash, &StateEventType::RoomEncryption, "", )? .is_some(); if let Some(since_shortstatehash) = since_shortstatehash { // Skip if there are only timeline changes if since_shortstatehash == current_shortstatehash { continue; } let since_encryption = services().rooms.state_accessor.state_get( since_shortstatehash, &StateEventType::RoomEncryption, "", )?; let joined_since_last_sync = since_sender_member.is_none_or(|member| { member.membership != MembershipState::Join }); let new_encrypted_room = encrypted_room && since_encryption.is_none(); if encrypted_room { let current_state_ids = services() .rooms .state_accessor .state_full_ids(current_shortstatehash) .await?; let since_state_ids = services() .rooms .state_accessor .state_full_ids(since_shortstatehash) .await?; for (key, event_id) in current_state_ids { if since_state_ids.get(&key) != Some(&event_id) { let Some(pdu) = services().rooms.timeline.get_pdu(&event_id)? else { error!(%event_id, "Event in state not found"); continue; }; if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { let user_id = UserId::parse(state_key.clone()) .map_err(|_| { Error::bad_database( "Invalid UserId in member \ PDU.", ) })?; if user_id == sender_user { continue; } let new_membership = serde_json::from_str::< RoomMemberEventContent, >( pdu.content.get() ) .map_err(|_| { Error::bad_database( "Invalid PDU in database.", ) })? .membership; match new_membership { MembershipState::Join => { // A new user joined an encrypted // room if !share_encrypted_room( &sender_user, &user_id, room_id, )? { device_list_changes .insert(user_id); } } MembershipState::Leave => { // Write down users that have left // encrypted rooms we are in left_encrypted_users .insert(user_id); } _ => {} } } } } } if joined_since_last_sync || new_encrypted_room { // If the user is in a new encrypted room, give them all // joined users device_list_changes.extend( services() .rooms .state_cache .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to // the sender &sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't // share an encrypted room with the target // already !share_encrypted_room( &sender_user, user_id, room_id, ) .unwrap_or(false) }), ); } } } // Look for device list updates in this room device_list_changes.extend( services() .users .keys_changed(room_id.as_ref(), globalsince, None) .filter_map(Result::ok), ); } for user_id in left_encrypted_users { let dont_share_encrypted_room = services() .rooms .user .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(Result::ok) .filter_map(|other_room_id| { Some( services() .rooms .state_accessor .room_state_get( &other_room_id, &StateEventType::RoomEncryption, "", ) .ok()? .is_some(), ) }) .all(|encrypted| !encrypted); // If the user doesn't share an encrypted room with the target // anymore, we need to tell them if dont_share_encrypted_room { device_list_left.insert(user_id); } } } let mut lists = BTreeMap::new(); // and required state let mut todo_rooms: BTreeMap = BTreeMap::new(); for (list_id, list) in body.lists { if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { continue; } let mut new_known_rooms = BTreeSet::new(); let mut ops = Vec::new(); for (mut from, mut to) in list.ranges { from = from.clamp( uint!(0), UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), ); to = to.clamp( from, UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX), ); let room_ids = all_joined_rooms[from.try_into().unwrap_or(usize::MAX) ..=to.try_into().unwrap_or(usize::MAX)] .to_vec(); new_known_rooms.extend(room_ids.iter().cloned()); for room_id in &room_ids { todo_rooms.entry(room_id.clone()).or_default().update( list.room_details.required_state.clone(), list.room_details.timeline_limit.unwrap_or(uint!(10)), known_rooms.get(&list_id), room_id, ); } ops.push(sync_events::v4::SyncOp { op: SlidingOp::Sync, range: Some((from, to)), index: None, room_ids, room_id: None, }); } lists.insert( list_id.clone(), sync_events::v4::SyncList { ops, count: UInt::try_from(all_joined_rooms.len()) .unwrap_or(UInt::MAX), }, ); if let Some(conn_id) = &body.conn_id { services().users.update_sync_known_rooms( sender_user.clone(), sender_device.clone(), conn_id.clone(), list_id, new_known_rooms, globalsince, ); } } let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { if !services().rooms.metadata.exists(room_id)? { continue; } todo_rooms.entry(room_id.clone()).or_default().update( room.required_state.clone(), room.timeline_limit.unwrap_or(uint!(10)), known_rooms.get("subscriptions"), room_id, ); known_subscription_rooms.insert(room_id.clone()); } for r in body.unsubscribe_rooms { known_subscription_rooms.remove(&r); body.room_subscriptions.remove(&r); } if let Some(conn_id) = &body.conn_id { services().users.update_sync_known_rooms( sender_user.clone(), sender_device.clone(), conn_id.clone(), "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } if let Some(conn_id) = &body.conn_id { services().users.update_sync_subscriptions( sender_user.clone(), sender_device.clone(), conn_id.clone(), body.room_subscriptions, ); } let mut rooms = BTreeMap::new(); for ( room_id, TodoRoom { required_state_request, timeline_limit, roomsince, }, ) in &todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); let (timeline_pdus, limited) = load_timeline( &sender_user, room_id, roomsincecount, *timeline_limit, )?; if roomsince != &0 && timeline_pdus.is_empty() { continue; } let prev_batch = timeline_pdus .first() .map(|(pdu_count, _)| match pdu_count { PduCount::Backfilled(_) => { error!("Timeline in backfill state?!"); "0".to_owned() } PduCount::Normal(c) => c.to_string(), }) .or_else(|| (roomsince != &0).then(|| roomsince.to_string())); let room_events: Vec<_> = timeline_pdus .iter() .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); let required_state = required_state_request .iter() .filter_map(|state| { services() .rooms .state_accessor .room_state_get(room_id, &state.0, &state.1) .ok() .flatten() .map(|state| state.to_sync_state_event()) }) .collect(); // Heroes let heroes = services() .rooms .state_cache .room_members(room_id) .filter_map(Result::ok) .filter(|member| member != &sender_user) .filter_map(|member| { services() .rooms .state_accessor .get_member(room_id, &member) .ok() .flatten() .map(|memberevent| { ( memberevent .displayname .unwrap_or_else(|| member.to_string()), memberevent.avatar_url, ) }) }) .take(5) .collect::>(); let name = match &*heroes { [] => None, [only] => Some(only.0.clone()), [firsts @ .., last] => Some({ let firsts = firsts .iter() .map(|h| h.0.clone()) .collect::>() .join(", "); format!("{firsts} and {}", last.0) }), }; let avatar = if let [only] = &*heroes { only.1.clone() } else { None }; rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { name: services() .rooms .state_accessor .get_name(room_id)? .or(name), avatar: if let Some(avatar) = avatar { JsOption::Some(avatar) } else { match services().rooms.state_accessor.get_avatar(room_id)? { JsOption::Some(avatar) => { JsOption::from_option(avatar.url) } JsOption::Null => JsOption::Null, JsOption::Undefined => JsOption::Undefined, } }, initial: Some(roomsince == &0), is_dm: None, invite_state: None, unread_notifications: UnreadNotificationsCount { highlight_count: Some( services() .rooms .user .highlight_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), notification_count: Some( services() .rooms .user .notification_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), }, timeline: room_events, required_state, prev_batch, limited, joined_count: Some( services() .rooms .state_cache .room_joined_count(room_id)? .map(UInt::new_saturating) .unwrap_or(uint!(0)), ), invited_count: Some( services() .rooms .state_cache .room_invited_count(room_id)? .map(UInt::new_saturating) .unwrap_or(uint!(0)), ), // Count events in timeline greater than global sync counter num_live: None, timestamp: None, // TODO heroes: None, }, ); } if rooms .iter() .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); if duration.as_secs() > 30 { duration = Duration::from_secs(30); } match tokio::time::timeout(duration, watcher).await { Ok(x) => x.expect("watcher should succeed"), Err(error) => debug!(%error, "Timed out"), }; } Ok(Ra(sync_events::v4::Response { initial: globalsince == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), lists, rooms, extensions: sync_events::v4::Extensions { to_device: body .extensions .to_device .enabled .unwrap_or(false) .then(|| { services() .users .get_to_device_events(&sender_user, &sender_device) .map(|events| sync_events::v4::ToDevice { events, next_batch: next_batch.to_string(), }) }) .transpose()?, e2ee: sync_events::v4::E2EE { device_lists: DeviceLists { changed: device_list_changes.into_iter().collect(), left: device_list_left.into_iter().collect(), }, device_one_time_keys_count: services() .users .count_one_time_keys(&sender_user, &sender_device)?, // Fallback keys are not yet supported device_unused_fallback_key_types: None, }, account_data: sync_events::v4::AccountData { global: if body.extensions.account_data.enabled.unwrap_or(false) { services() .account_data .global_changes_since(&sender_user, globalsince)? .into_iter() .map(|(event_type, content)| { account_data::raw_global_event_from_parts( &event_type, &content, ) }) .collect() } else { Vec::new() }, rooms: BTreeMap::new(), }, receipts: sync_events::v4::Receipts { rooms: BTreeMap::new(), }, typing: sync_events::v4::Typing { rooms: BTreeMap::new(), }, }, delta_token: None, })) }