stop putting comments and code on the same line

This commit is contained in:
Charles Hall 2024-05-15 15:40:56 -07:00
parent 0915aba44c
commit 1911ad34d9
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
35 changed files with 305 additions and 142 deletions

View file

@ -43,16 +43,20 @@ use base64::{engine::general_purpose, Engine as _};
type WellKnownMap = HashMap<OwnedServerName, (FedDest, String)>;
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
// Time if last failed try, number of failed tries
type RateLimitState = (Instant, u32);
type SyncHandle = (
Option<String>, // since
Receiver<Option<Result<sync_events::v3::Response>>>, // rx
// since
Option<String>,
// rx
Receiver<Option<Result<sync_events::v3::Response>>>,
);
pub(crate) struct Service {
pub(crate) db: &'static dyn Data,
pub(crate) actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
// actual_destination, host
pub(crate) actual_destination_cache: Arc<RwLock<WellKnownMap>>,
pub(crate) tls_name_override: Arc<StdRwLock<TlsNameMap>>,
pub(crate) config: Config,
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
@ -69,7 +73,9 @@ pub(crate) struct Service {
pub(crate) sync_receivers: RwLock<HashMap<(OwnedUserId, OwnedDeviceId), SyncHandle>>,
pub(crate) roomid_mutex_insert: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub(crate) roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub(crate) roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>, // this lock will be held longer
// this lock will be held longer
pub(crate) roomid_mutex_federation: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
pub(crate) roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
pub(crate) stateres_mutex: Arc<Mutex<()>>,
pub(crate) rotate: RotationHandler,

View file

@ -113,8 +113,9 @@ impl Service {
width: u32,
height: u32,
) -> Result<Option<FileMeta>> {
// 0, 0 because that's the original file
let (width, height, crop) =
Self::thumbnail_properties(width, height).unwrap_or((0, 0, false)); // 0, 0 because that's the original file
Self::thumbnail_properties(width, height).unwrap_or((0, 0, false));
if let Ok((content_disposition, content_type, key)) =
self.db.search_file_metadata(mxc.clone(), width, height)

View file

@ -93,10 +93,11 @@ impl Service {
.expect("http::response::Builder is usable"),
);
// TODO: handle timeout
let body = response.bytes().await.unwrap_or_else(|e| {
warn!("server error {}", e);
Vec::new().into()
}); // TODO: handle timeout
});
if status != 200 {
info!(
@ -201,7 +202,8 @@ impl Service {
let ctx = PushConditionRoomCtx {
room_id: room_id.to_owned(),
member_count: 10_u32.into(), // TODO: get member count efficiently
// TODO: get member count efficiently
member_count: 10_u32.into(),
user_id: user.to_owned(),
user_display_name: services()
.users

View file

@ -6,8 +6,10 @@ use tracing::trace;
use crate::{services, utils, Result};
pub(crate) struct Service {
pub(crate) typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
pub(crate) last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>, // timestamp of the last change to typing users
// u64 is unix timestamp of timeout
pub(crate) typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>,
// timestamp of the last change to typing users
pub(crate) last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>,
pub(crate) typing_update_sender: broadcast::Sender<OwnedRoomId>,
}

View file

@ -446,7 +446,8 @@ impl Service {
if !state_res::event_auth::auth_check(
&room_version,
&incoming_pdu,
None::<PduEvent>, // TODO: third party invite
// TODO: third party invite
None::<PduEvent>,
|k, s| auth_events.get(&(k.to_string().into(), s.to_owned())),
)
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))?
@ -748,7 +749,8 @@ impl Service {
let check_result = state_res::event_auth::auth_check(
&room_version,
&incoming_pdu,
None::<PduEvent>, // TODO: third party invite
// TODO: third party invite
None::<PduEvent>,
|k, s| {
services()
.rooms

View file

@ -64,7 +64,8 @@ impl Service {
let events_after: Vec<_> = services()
.rooms
.pdu_metadata
.relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after
// TODO: should be relations_after
.relations_until(sender_user, room_id, target, from)?
.filter(|r| {
r.as_ref().map_or(true, |(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &&pdu.kind == t)
@ -90,14 +91,16 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
// Stop at `to`
.take_while(|&(k, _)| Some(k) != to)
.collect();
next_token = events_after.last().map(|(count, _)| count).copied();
let events_after: Vec<_> = events_after
.into_iter()
.rev() // relations are always most recent first
// relations are always most recent first
.rev()
.map(|(_, pdu)| pdu.to_message_like_event())
.collect();
@ -137,7 +140,8 @@ impl Service {
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
// Stop at `to`
.take_while(|&(k, _)| Some(k) != to)
.collect();
next_token = events_before.last().map(|(count, _)| count).copied();
@ -167,7 +171,8 @@ impl Service {
let target = match services().rooms.timeline.get_pdu_count(target)? {
Some(PduCount::Normal(c)) => c,
// TODO: Support backfilled relations
_ => 0, // This will result in an empty iterator
// This will result in an empty iterator
_ => 0,
};
self.db.relations_until(user_id, room_id, target, until)
}

View file

@ -35,7 +35,8 @@ impl Service {
shortstatehash: u64,
statediffnew: Arc<HashSet<CompressedStateEvent>>,
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
state_lock: &MutexGuard<'_, ()>,
) -> Result<()> {
for event_id in statediffnew.iter().filter_map(|new| {
services()
@ -169,7 +170,8 @@ impl Service {
shortstatehash,
statediffnew,
statediffremoved,
1_000_000, // high number because no state will be based on this one
// high number because no state will be based on this one
1_000_000,
states_parents,
)?;
}
@ -315,7 +317,8 @@ impl Service {
&self,
room_id: &RoomId,
shortstatehash: u64,
mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
mutex_lock: &MutexGuard<'_, ()>,
) -> Result<()> {
self.db.set_room_state(room_id, shortstatehash, mutex_lock)
}
@ -358,7 +361,8 @@ impl Service {
&self,
room_id: &RoomId,
event_ids: Vec<OwnedEventId>,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
state_lock: &MutexGuard<'_, ()>,
) -> Result<()> {
self.db
.set_forward_extremities(room_id, event_ids, state_lock)

View file

@ -12,7 +12,8 @@ pub(crate) trait Data: Send + Sync {
&self,
room_id: &RoomId,
new_shortstatehash: u64,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
_mutex_lock: &MutexGuard<'_, ()>,
) -> Result<()>;
/// Associates a state with an event.
@ -26,6 +27,7 @@ pub(crate) trait Data: Send + Sync {
&self,
room_id: &RoomId,
event_ids: Vec<OwnedEventId>,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
_mutex_lock: &MutexGuard<'_, ()>,
) -> Result<()>;
}

View file

@ -151,8 +151,10 @@ impl Service {
let is_ignored = services()
.account_data
.get(
None, // Ignored users are in global account data
user_id, // Receiver
// Ignored users are in global account data
None,
// Receiver
user_id,
GlobalAccountDataEventType::IgnoredUserList
.to_string()
.into(),

View file

@ -21,10 +21,14 @@ pub(crate) struct Service {
LruCache<
u64,
Vec<(
u64, // shortstatehash
Arc<HashSet<CompressedStateEvent>>, // full state
Arc<HashSet<CompressedStateEvent>>, // added
Arc<HashSet<CompressedStateEvent>>, // removed
// shortstatehash
u64,
// full state
Arc<HashSet<CompressedStateEvent>>,
// added
Arc<HashSet<CompressedStateEvent>>,
// removed
Arc<HashSet<CompressedStateEvent>>,
)>,
>,
>,
@ -41,10 +45,14 @@ impl Service {
shortstatehash: u64,
) -> Result<
Vec<(
u64, // shortstatehash
Arc<HashSet<CompressedStateEvent>>, // full state
Arc<HashSet<CompressedStateEvent>>, // added
Arc<HashSet<CompressedStateEvent>>, // removed
// shortstatehash
u64,
// full state
Arc<HashSet<CompressedStateEvent>>,
// added
Arc<HashSet<CompressedStateEvent>>,
// removed
Arc<HashSet<CompressedStateEvent>>,
)>,
> {
if let Some(r) = self
@ -152,10 +160,14 @@ impl Service {
statediffremoved: Arc<HashSet<CompressedStateEvent>>,
diff_to_sibling: usize,
mut parent_states: Vec<(
u64, // shortstatehash
Arc<HashSet<CompressedStateEvent>>, // full state
Arc<HashSet<CompressedStateEvent>>, // added
Arc<HashSet<CompressedStateEvent>>, // removed
// shortstatehash
u64,
// full state
Arc<HashSet<CompressedStateEvent>>,
// added
Arc<HashSet<CompressedStateEvent>>,
// removed
Arc<HashSet<CompressedStateEvent>>,
)>,
) -> Result<()> {
let diffsum = statediffnew.len() + statediffremoved.len();
@ -318,7 +330,8 @@ impl Service {
new_shortstatehash,
statediffnew.clone(),
statediffremoved.clone(),
2, // every state change is 2 event changes on average
// every state change is 2 event changes on average
2,
states_parents,
)?;
};

View file

@ -177,7 +177,8 @@ impl Service {
pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject,
leaves: Vec<OwnedEventId>,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
state_lock: &MutexGuard<'_, ()>,
) -> Result<Vec<u8>> {
let shortroomid = services()
.rooms
@ -527,7 +528,8 @@ impl Service {
.threads
.add_to_thread(&thread.event_id, pdu)?;
}
_ => {} // TODO: Aggregate other types
// TODO: Aggregate other types
_ => {}
}
}
@ -598,7 +600,8 @@ impl Service {
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
_mutex_lock: &MutexGuard<'_, ()>,
) -> Result<(PduEvent, CanonicalJsonObject)> {
let PduBuilder {
event_type,
@ -702,7 +705,8 @@ impl Service {
let auth_check = state_res::auth_check(
&room_version,
&pdu,
None::<PduEvent>, // TODO: third_party_invite
// TODO: third_party_invite
None::<PduEvent>,
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
)
.map_err(|e| {
@ -781,7 +785,8 @@ impl Service {
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
state_lock: &MutexGuard<'_, ()>,
) -> Result<Arc<EventId>> {
let (pdu, pdu_json) =
self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
@ -990,7 +995,8 @@ impl Service {
new_room_leaves: Vec<OwnedEventId>,
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
soft_fail: bool,
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
// Take mutex guard to make sure users get the room state mutex
state_lock: &MutexGuard<'_, ()>,
) -> Result<Option<Vec<u8>>> {
// We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail.

View file

@ -47,7 +47,8 @@ use tracing::{debug, error, warn};
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) enum OutgoingKind {
Appservice(String),
Push(OwnedUserId, String), // user and pushkey
// user and pushkey
Push(OwnedUserId, String),
Normal(OwnedServerName),
}
@ -81,8 +82,10 @@ impl OutgoingKind {
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SendingEventType {
Pdu(Vec<u8>), // pduid
Edu(Vec<u8>), // pdu json
// pduid
Pdu(Vec<u8>),
// pdu json
Edu(Vec<u8>),
}
pub(crate) struct Service {
@ -96,8 +99,10 @@ pub(crate) struct Service {
enum TransactionStatus {
Running,
Failed(u32, Instant), // number of times failed, time of last failure
Retrying(u32), // number of times failed
// number of times failed, time of last failure
Failed(u32, Instant),
// number of times failed
Retrying(u32),
}
impl Service {
@ -203,7 +208,8 @@ impl Service {
fn select_events(
&self,
outgoing_kind: &OutgoingKind,
new_events: Vec<(SendingEventType, Vec<u8>)>, // Events we want to send: event and full key
// Events we want to send: event and full key
new_events: Vec<(SendingEventType, Vec<u8>)>,
current_transaction_status: &mut HashMap<OutgoingKind, TransactionStatus>,
) -> Result<Option<Vec<SendingEventType>>> {
let mut retry = false;
@ -214,7 +220,8 @@ impl Service {
entry
.and_modify(|e| match e {
TransactionStatus::Running | TransactionStatus::Retrying(_) => {
allow = false; // already running
// already running
allow = false;
}
TransactionStatus::Failed(tries, time) => {
// Fail if a request has failed recently (exponential backoff)
@ -444,7 +451,6 @@ impl Service {
/// Cleanup event data
/// Used for instance after we remove an appservice registration
///
#[tracing::instrument(skip(self))]
pub(crate) fn cleanup_events(&self, appservice_id: String) -> Result<()> {
self.db
@ -543,9 +549,8 @@ impl Service {
})?,
);
}
SendingEventType::Edu(_) => {
// Push gateways don't need EDUs (?)
}
// Push gateways don't need EDUs (?)
SendingEventType::Edu(_) => {}
}
}

View file

@ -29,7 +29,8 @@ impl Service {
self.db.set_uiaa_request(
user_id,
device_id,
uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?)
// TODO: better session error handling (why is it optional in ruma?)
uiaainfo.session.as_ref().expect("session should be set"),
json_body,
)?;
self.db.update_uiaa_session(

View file

@ -28,7 +28,8 @@ use crate::{services, Error, Result};
pub(crate) struct SlidingSyncCache {
lists: BTreeMap<String, SyncRequestList>,
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>, // For every room, the roomsince number
// For every room, the roomsince number
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>,
extensions: ExtensionsConfig,
}