mirror of
https://gitlab.computer.surgery/matrix/grapevine.git
synced 2025-12-17 07:41:23 +01:00
enable as_conversions lint
There were some very, uh, creative (and inconsistent) ways to convert between numeric types in here...
This commit is contained in:
parent
a78bf8f50b
commit
71c48f66c4
21 changed files with 195 additions and 91 deletions
|
|
@ -17,6 +17,7 @@ unused_qualifications = "warn"
|
||||||
|
|
||||||
# Keep alphabetically sorted
|
# Keep alphabetically sorted
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
|
as_conversions = "warn"
|
||||||
assertions_on_result_states = "warn"
|
assertions_on_result_states = "warn"
|
||||||
cloned_instead_of_copied = "warn"
|
cloned_instead_of_copied = "warn"
|
||||||
dbg_macro = "warn"
|
dbg_macro = "warn"
|
||||||
|
|
|
||||||
|
|
@ -56,7 +56,11 @@ pub(crate) async fn get_latest_backup_info_route(
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
count: services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &version)?
|
||||||
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services().key_backups.get_etag(sender_user, &version)?,
|
etag: services().key_backups.get_etag(sender_user, &version)?,
|
||||||
version,
|
version,
|
||||||
})
|
})
|
||||||
|
|
@ -79,10 +83,11 @@ pub(crate) async fn get_backup_info_route(
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
Ok(get_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -144,10 +149,11 @@ pub(crate) async fn add_backup_keys_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
Ok(add_backup_keys::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -189,10 +195,11 @@ pub(crate) async fn add_backup_keys_for_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -232,10 +239,11 @@ pub(crate) async fn add_backup_keys_for_session_route(
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -302,10 +310,11 @@ pub(crate) async fn delete_backup_keys_route(
|
||||||
.delete_all_keys(sender_user, &body.version)?;
|
.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
Ok(delete_backup_keys::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -325,10 +334,11 @@ pub(crate) async fn delete_backup_keys_for_room_route(
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
@ -351,10 +361,11 @@ pub(crate) async fn delete_backup_keys_for_session_route(
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
count: (services()
|
count: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.count_keys(sender_user, &body.version)? as u32)
|
.count_keys(sender_user, &body.version)?
|
||||||
.into(),
|
.try_into()
|
||||||
|
.expect("count should fit in UInt"),
|
||||||
etag: services()
|
etag: services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_etag(sender_user, &body.version)?,
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
||||||
events::StateEventType,
|
events::StateEventType,
|
||||||
|
uint,
|
||||||
};
|
};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
@ -70,7 +71,8 @@ pub(crate) async fn get_context_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use limit with maximum 100
|
// Use limit with maximum 100
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
let half_limit =
|
||||||
|
usize::try_from(body.limit.min(uint!(100)) / uint!(2)).expect("0-50 should fit in usize");
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
|
|
@ -78,7 +80,7 @@ pub(crate) async fn get_context_route(
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(half_limit)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
services()
|
services()
|
||||||
|
|
@ -115,7 +117,7 @@ pub(crate) async fn get_context_route(
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
.take(limit / 2)
|
.take(half_limit)
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.filter(|(_, pdu)| {
|
.filter(|(_, pdu)| {
|
||||||
services()
|
services()
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
ServerName, UInt,
|
uint, ServerName, UInt,
|
||||||
};
|
};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
|
@ -157,8 +157,8 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let limit = limit.map_or(10, u64::from);
|
let limit = limit.unwrap_or(uint!(10));
|
||||||
let mut num_since = 0_u64;
|
let mut num_since = UInt::MIN;
|
||||||
|
|
||||||
if let Some(s) = &since {
|
if let Some(s) = &since {
|
||||||
let mut characters = s.chars();
|
let mut characters = s.chars();
|
||||||
|
|
@ -340,21 +340,21 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
|
|
||||||
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
|
||||||
|
|
||||||
let total_room_count_estimate = (all_rooms.len() as u32).into();
|
let total_room_count_estimate = all_rooms.len().try_into().unwrap_or(UInt::MAX);
|
||||||
|
|
||||||
let chunk: Vec<_> = all_rooms
|
let chunk: Vec<_> = all_rooms
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.skip(num_since as usize)
|
.skip(num_since.try_into().expect("UInt should fit in usize"))
|
||||||
.take(limit as usize)
|
.take(limit.try_into().expect("UInt should fit in usize"))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let prev_batch = if num_since == 0 {
|
let prev_batch = if num_since == uint!(0) {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!("p{num_since}"))
|
Some(format!("p{num_since}"))
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
let next_batch = if chunk.len() < limit.try_into().expect("UInt should fit in usize") {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!("n{}", num_since + limit))
|
Some(format!("n{}", num_since + limit))
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ use ruma::{
|
||||||
message::{get_message_events, send_message_event},
|
message::{get_message_events, send_message_event},
|
||||||
},
|
},
|
||||||
events::{StateEventType, TimelineEventType},
|
events::{StateEventType, TimelineEventType},
|
||||||
|
uint,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashSet},
|
collections::{BTreeMap, HashSet},
|
||||||
|
|
@ -136,7 +137,11 @@ pub(crate) async fn get_message_events_route(
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let limit = u64::from(body.limit).min(100) as usize;
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.min(uint!(100))
|
||||||
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,9 @@
|
||||||
use ruma::api::client::relations::{
|
use ruma::{
|
||||||
get_relating_events, get_relating_events_with_rel_type,
|
api::client::relations::{
|
||||||
get_relating_events_with_rel_type_and_event_type,
|
get_relating_events, get_relating_events_with_rel_type,
|
||||||
|
get_relating_events_with_rel_type_and_event_type,
|
||||||
|
},
|
||||||
|
uint,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
|
||||||
|
|
@ -28,9 +31,10 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
.limit
|
.limit
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
.map(|x| x.min(uint!(100)))
|
||||||
.map_or(10_usize, |u| u as usize)
|
.unwrap_or(uint!(10))
|
||||||
.min(100);
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
let res = services()
|
let res = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
@ -78,9 +82,10 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
.limit
|
.limit
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
.map(|x| x.min(uint!(100)))
|
||||||
.map_or(10_usize, |u| u as usize)
|
.unwrap_or(uint!(10))
|
||||||
.min(100);
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
let res = services()
|
let res = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
@ -126,9 +131,10 @@ pub(crate) async fn get_relating_events_route(
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = body
|
let limit = body
|
||||||
.limit
|
.limit
|
||||||
.and_then(|u| u32::try_from(u).ok())
|
.map(|x| x.min(uint!(100)))
|
||||||
.map_or(10_usize, |u| u as usize)
|
.unwrap_or(uint!(10))
|
||||||
.min(100);
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,13 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::{
|
||||||
error::ErrorKind,
|
api::client::{
|
||||||
search::search_events::{
|
error::ErrorKind,
|
||||||
self,
|
search::search_events::{
|
||||||
v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
|
self,
|
||||||
|
v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
uint,
|
||||||
};
|
};
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
@ -32,7 +35,12 @@ pub(crate) async fn search_events_route(
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use limit or else 10, with maximum 100
|
// Use limit or else 10, with maximum 100
|
||||||
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
|
let limit = filter
|
||||||
|
.limit
|
||||||
|
.map(|x| x.min(uint!(100)))
|
||||||
|
.unwrap_or(uint!(10))
|
||||||
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
let mut searches = Vec::new();
|
let mut searches = Vec::new();
|
||||||
|
|
||||||
|
|
@ -123,8 +131,8 @@ pub(crate) async fn search_events_route(
|
||||||
|
|
||||||
Ok(search_events::v3::Response::new(ResultCategories {
|
Ok(search_events::v3::Response::new(ResultCategories {
|
||||||
room_events: ResultRoomEvents {
|
room_events: ResultRoomEvents {
|
||||||
count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it
|
count: None,
|
||||||
groups: BTreeMap::new(), // TODO
|
groups: BTreeMap::new(), // TODO
|
||||||
next_batch,
|
next_batch,
|
||||||
results,
|
results,
|
||||||
state: BTreeMap::new(), // TODO
|
state: BTreeMap::new(), // TODO
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{services, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use ruma::api::client::space::get_hierarchy;
|
use ruma::{api::client::space::get_hierarchy, uint};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
|
||||||
///
|
///
|
||||||
|
|
@ -15,9 +15,17 @@ pub(crate) async fn get_hierarchy_route(
|
||||||
.and_then(|s| s.parse::<usize>().ok())
|
.and_then(|s| s.parse::<usize>().ok())
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
|
let limit = body
|
||||||
|
.limit
|
||||||
|
.map(|x| x.min(uint!(100)))
|
||||||
|
.unwrap_or(uint!(10))
|
||||||
|
.try_into()
|
||||||
|
.expect("0-100 should fit in usize");
|
||||||
|
|
||||||
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
|
let max_depth = usize::try_from(body.max_depth.map(|x| x.min(uint!(10))).unwrap_or(uint!(3)))
|
||||||
|
.expect("0-10 should fit in usize")
|
||||||
|
// Skip the space room itself
|
||||||
|
+ 1;
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
||||||
|
|
@ -1087,8 +1087,8 @@ async fn load_joined_room(
|
||||||
},
|
},
|
||||||
summary: RoomSummary {
|
summary: RoomSummary {
|
||||||
heroes,
|
heroes,
|
||||||
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
|
joined_member_count: joined_member_count.map(UInt::new_saturating),
|
||||||
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
|
invited_member_count: invited_member_count.map(UInt::new_saturating),
|
||||||
},
|
},
|
||||||
unread_notifications: UnreadNotificationsCount {
|
unread_notifications: UnreadNotificationsCount {
|
||||||
highlight_count,
|
highlight_count,
|
||||||
|
|
@ -1140,7 +1140,7 @@ fn load_timeline(
|
||||||
// Take the last events for the timeline
|
// Take the last events for the timeline
|
||||||
timeline_pdus = non_timeline_pdus
|
timeline_pdus = non_timeline_pdus
|
||||||
.by_ref()
|
.by_ref()
|
||||||
.take(limit as usize)
|
.take(limit.try_into().expect("limit should fit in usize"))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.rev()
|
.rev()
|
||||||
|
|
@ -1427,12 +1427,16 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
.ranges
|
.ranges
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|mut r| {
|
.map(|mut r| {
|
||||||
r.0 =
|
r.0 = r.0.clamp(
|
||||||
r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1));
|
uint!(0),
|
||||||
r.1 =
|
UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX),
|
||||||
r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1));
|
);
|
||||||
let room_ids = all_joined_rooms
|
r.1 = r.1.clamp(
|
||||||
[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)]
|
r.0,
|
||||||
|
UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX),
|
||||||
|
);
|
||||||
|
let room_ids = all_joined_rooms[r.0.try_into().unwrap_or(usize::MAX)
|
||||||
|
..=r.1.try_into().unwrap_or(usize::MAX)]
|
||||||
.to_vec();
|
.to_vec();
|
||||||
new_known_rooms.extend(room_ids.iter().cloned());
|
new_known_rooms.extend(room_ids.iter().cloned());
|
||||||
for room_id in &room_ids {
|
for room_id in &room_ids {
|
||||||
|
|
@ -1468,7 +1472,7 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
count: UInt::from(all_joined_rooms.len() as u32),
|
count: UInt::try_from(all_joined_rooms.len()).unwrap_or(UInt::MAX),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -1663,20 +1667,20 @@ pub(crate) async fn sync_events_v4_route(
|
||||||
prev_batch,
|
prev_batch,
|
||||||
limited,
|
limited,
|
||||||
joined_count: Some(
|
joined_count: Some(
|
||||||
(services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_joined_count(room_id)?
|
.room_joined_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.map(UInt::new_saturating)
|
||||||
.into(),
|
.unwrap_or(uint!(0)),
|
||||||
),
|
),
|
||||||
invited_count: Some(
|
invited_count: Some(
|
||||||
(services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state_cache
|
.state_cache
|
||||||
.room_invited_count(room_id)?
|
.room_invited_count(room_id)?
|
||||||
.unwrap_or(0) as u32)
|
.map(UInt::new_saturating)
|
||||||
.into(),
|
.unwrap_or(uint!(0)),
|
||||||
),
|
),
|
||||||
num_live: None, // Count events in timeline greater than global sync counter
|
num_live: None, // Count events in timeline greater than global sync counter
|
||||||
timestamp: None,
|
timestamp: None,
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,8 @@ pub(crate) async fn create_typing_event_route(
|
||||||
.typing_add(
|
.typing_add(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
duration.as_millis().try_into().unwrap_or(u64::MAX)
|
||||||
|
+ utils::millis_since_unix_epoch(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ pub(crate) async fn search_users_route(
|
||||||
body: Ruma<search_users::v3::Request>,
|
body: Ruma<search_users::v3::Request>,
|
||||||
) -> Result<search_users::v3::Response> {
|
) -> Result<search_users::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let limit = u64::from(body.limit) as usize;
|
let limit = body.limit.try_into().unwrap_or(usize::MAX);
|
||||||
|
|
||||||
let mut users = services().users.iter().filter_map(|user_id| {
|
let mut users = services().users.iter().filter_map(|user_id| {
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
|
|
|
||||||
|
|
@ -437,7 +437,9 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
// With more than 1 buf, we gotta flatten into a Vec first.
|
// With more than 1 buf, we gotta flatten into a Vec first.
|
||||||
let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
|
let cap = first.remaining()
|
||||||
|
+ second.remaining()
|
||||||
|
+ body.size_hint().lower().try_into().unwrap_or(usize::MAX);
|
||||||
let mut vec = Vec::with_capacity(cap);
|
let mut vec = Vec::with_capacity(cap);
|
||||||
vec.put(first);
|
vec.put(first);
|
||||||
vec.put(second);
|
vec.put(second);
|
||||||
|
|
|
||||||
|
|
@ -1100,7 +1100,7 @@ pub(crate) async fn get_missing_events_route(
|
||||||
let mut events = Vec::new();
|
let mut events = Vec::new();
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < queued_events.len() && events.len() < u64::from(body.limit) as usize {
|
while i < queued_events.len() && events.len() < body.limit.try_into().unwrap_or(usize::MAX) {
|
||||||
if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? {
|
if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? {
|
||||||
let room_id_str = pdu
|
let room_id_str = pdu
|
||||||
.get("room_id")
|
.get("room_id")
|
||||||
|
|
|
||||||
|
|
@ -345,18 +345,43 @@ impl KeyValueDatabase {
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("pdu cache capacity fits into usize"),
|
.expect("pdu cache capacity fits into usize"),
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
auth_chain_cache: Mutex::new(LruCache::new(
|
auth_chain_cache: Mutex::new(LruCache::new(
|
||||||
(100_000.0 * config.cache_capacity_modifier) as usize,
|
(100_000.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
shorteventid_cache: Mutex::new(LruCache::new(
|
shorteventid_cache: Mutex::new(LruCache::new(
|
||||||
(100_000.0 * config.cache_capacity_modifier) as usize,
|
(100_000.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
eventidshort_cache: Mutex::new(LruCache::new(
|
eventidshort_cache: Mutex::new(LruCache::new(
|
||||||
(100_000.0 * config.cache_capacity_modifier) as usize,
|
(100_000.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
shortstatekey_cache: Mutex::new(LruCache::new(
|
shortstatekey_cache: Mutex::new(LruCache::new(
|
||||||
(100_000.0 * config.cache_capacity_modifier) as usize,
|
(100_000.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
statekeyshort_cache: Mutex::new(LruCache::new(
|
statekeyshort_cache: Mutex::new(LruCache::new(
|
||||||
(100_000.0 * config.cache_capacity_modifier) as usize,
|
(100_000.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
|
@ -979,7 +1004,7 @@ impl KeyValueDatabase {
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
let timer_interval =
|
let timer_interval =
|
||||||
Duration::from_secs(services().globals.config.cleanup_second_interval as u64);
|
Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval));
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut i = interval(timer_interval);
|
let mut i = interval(timer_interval);
|
||||||
|
|
|
||||||
|
|
@ -32,7 +32,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
let mut db_opts = rocksdb::Options::default();
|
let mut db_opts = rocksdb::Options::default();
|
||||||
db_opts.set_block_based_table_factory(&block_based_options);
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
db_opts.create_if_missing(true);
|
db_opts.create_if_missing(true);
|
||||||
db_opts.increase_parallelism(num_cpus::get() as i32);
|
db_opts.increase_parallelism(num_cpus::get().try_into().unwrap_or(i32::MAX));
|
||||||
db_opts.set_max_open_files(max_open_files);
|
db_opts.set_max_open_files(max_open_files);
|
||||||
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
||||||
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||||
|
|
@ -58,6 +58,11 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
|
|
||||||
impl KeyValueDatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes);
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes);
|
||||||
|
|
||||||
|
|
@ -109,6 +114,7 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
|
||||||
fn memory_usage(&self) -> Result<String> {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
let stats =
|
let stats =
|
||||||
rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?;
|
rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?;
|
||||||
|
|
|
||||||
|
|
@ -88,9 +88,14 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
// 1. convert MB to KiB
|
// 1. convert MB to KiB
|
||||||
// 2. divide by permanent connections + permanent iter connections + write connection
|
// 2. divide by permanent connections + permanent iter connections + write connection
|
||||||
// 3. round down to nearest integer
|
// 3. round down to nearest integer
|
||||||
let cache_size_per_thread: u32 = ((config.db_cache_capacity_mb * 1024.0)
|
#[allow(
|
||||||
/ ((num_cpus::get().max(1) * 2) + 1) as f64)
|
clippy::as_conversions,
|
||||||
as u32;
|
clippy::cast_possible_truncation,
|
||||||
|
clippy::cast_precision_loss,
|
||||||
|
clippy::cast_sign_loss
|
||||||
|
)]
|
||||||
|
let cache_size_per_thread = ((config.db_cache_capacity_mb * 1024.0)
|
||||||
|
/ ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32;
|
||||||
|
|
||||||
let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
|
let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -84,9 +84,19 @@ impl Services {
|
||||||
state: rooms::state::Service { db },
|
state: rooms::state::Service { db },
|
||||||
state_accessor: rooms::state_accessor::Service {
|
state_accessor: rooms::state_accessor::Service {
|
||||||
db,
|
db,
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
server_visibility_cache: StdMutex::new(LruCache::new(
|
server_visibility_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.cache_capacity_modifier) as usize,
|
(100.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
user_visibility_cache: StdMutex::new(LruCache::new(
|
user_visibility_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.cache_capacity_modifier) as usize,
|
(100.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
|
@ -94,6 +104,11 @@ impl Services {
|
||||||
state_cache: rooms::state_cache::Service { db },
|
state_cache: rooms::state_cache::Service { db },
|
||||||
state_compressor: rooms::state_compressor::Service {
|
state_compressor: rooms::state_compressor::Service {
|
||||||
db,
|
db,
|
||||||
|
#[allow(
|
||||||
|
clippy::as_conversions,
|
||||||
|
clippy::cast_sign_loss,
|
||||||
|
clippy::cast_possible_truncation
|
||||||
|
)]
|
||||||
stateinfo_cache: StdMutex::new(LruCache::new(
|
stateinfo_cache: StdMutex::new(LruCache::new(
|
||||||
(100.0 * config.cache_capacity_modifier) as usize,
|
(100.0 * config.cache_capacity_modifier) as usize,
|
||||||
)),
|
)),
|
||||||
|
|
|
||||||
|
|
@ -154,10 +154,8 @@ impl Service {
|
||||||
} else {
|
} else {
|
||||||
let (exact_width, exact_height) = {
|
let (exact_width, exact_height) = {
|
||||||
// Copied from image::dynimage::resize_dimensions
|
// Copied from image::dynimage::resize_dimensions
|
||||||
let ratio = u64::from(original_width) * u64::from(height);
|
let use_width = (u64::from(width) * u64::from(original_height))
|
||||||
let nratio = u64::from(width) * u64::from(original_height);
|
<= (u64::from(original_width) * u64::from(height));
|
||||||
|
|
||||||
let use_width = nratio <= ratio;
|
|
||||||
let intermediate = if use_width {
|
let intermediate = if use_width {
|
||||||
u64::from(original_height) * u64::from(width)
|
u64::from(original_height) * u64::from(width)
|
||||||
/ u64::from(original_width)
|
/ u64::from(original_width)
|
||||||
|
|
@ -167,21 +165,23 @@ impl Service {
|
||||||
};
|
};
|
||||||
if use_width {
|
if use_width {
|
||||||
if intermediate <= u64::from(::std::u32::MAX) {
|
if intermediate <= u64::from(::std::u32::MAX) {
|
||||||
(width, intermediate as u32)
|
(width, intermediate.try_into().unwrap_or(u32::MAX))
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
|
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
|
||||||
as u32,
|
.try_into()
|
||||||
|
.unwrap_or(u32::MAX),
|
||||||
::std::u32::MAX,
|
::std::u32::MAX,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
} else if intermediate <= u64::from(::std::u32::MAX) {
|
} else if intermediate <= u64::from(::std::u32::MAX) {
|
||||||
(intermediate as u32, height)
|
(intermediate.try_into().unwrap_or(u32::MAX), height)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
::std::u32::MAX,
|
::std::u32::MAX,
|
||||||
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
|
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
|
||||||
as u32,
|
.try_into()
|
||||||
|
.unwrap_or(u32::MAX),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,9 @@ impl Service {
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for id in starting_events {
|
for id in starting_events {
|
||||||
let short = services().rooms.short.get_or_create_shorteventid(&id)?;
|
let short = services().rooms.short.get_or_create_shorteventid(&id)?;
|
||||||
|
// I'm afraid to change this in case there is accidental reliance on
|
||||||
|
// the truncation
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_possible_truncation)]
|
||||||
let bucket_id = (short % NUM_BUCKETS as u64) as usize;
|
let bucket_id = (short % NUM_BUCKETS as u64) as usize;
|
||||||
buckets[bucket_id].insert((short, id.clone()));
|
buckets[bucket_id].insert((short, id.clone()));
|
||||||
i += 1;
|
i += 1;
|
||||||
|
|
|
||||||
|
|
@ -107,7 +107,7 @@ impl Service {
|
||||||
db,
|
db,
|
||||||
sender,
|
sender,
|
||||||
receiver: Mutex::new(receiver),
|
receiver: Mutex::new(receiver),
|
||||||
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
|
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests.into())),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -11,6 +11,8 @@ use std::{
|
||||||
time::{SystemTime, UNIX_EPOCH},
|
time::{SystemTime, UNIX_EPOCH},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Hopefully we have a better chat protocol in 530 years
|
||||||
|
#[allow(clippy::as_conversions, clippy::cast_possible_truncation)]
|
||||||
pub(crate) fn millis_since_unix_epoch() -> u64 {
|
pub(crate) fn millis_since_unix_epoch() -> u64 {
|
||||||
SystemTime::now()
|
SystemTime::now()
|
||||||
.duration_since(UNIX_EPOCH)
|
.duration_since(UNIX_EPOCH)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue