enable as_conversions lint

There were some very, uh, creative (and inconsistent) ways to convert
between numeric types in here...
This commit is contained in:
Charles Hall 2024-05-12 15:32:23 -07:00
parent a78bf8f50b
commit 71c48f66c4
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
21 changed files with 195 additions and 91 deletions

View file

@ -56,7 +56,11 @@ pub(crate) async fn get_latest_backup_info_route(
Ok(get_latest_backup_info::v3::Response {
algorithm,
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
count: services()
.key_backups
.count_keys(sender_user, &version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &version)?,
version,
})
@ -79,10 +83,11 @@ pub(crate) async fn get_backup_info_route(
Ok(get_backup_info::v3::Response {
algorithm,
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -144,10 +149,11 @@ pub(crate) async fn add_backup_keys_route(
}
Ok(add_backup_keys::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -189,10 +195,11 @@ pub(crate) async fn add_backup_keys_for_room_route(
}
Ok(add_backup_keys_for_room::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -232,10 +239,11 @@ pub(crate) async fn add_backup_keys_for_session_route(
)?;
Ok(add_backup_keys_for_session::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -302,10 +310,11 @@ pub(crate) async fn delete_backup_keys_route(
.delete_all_keys(sender_user, &body.version)?;
Ok(delete_backup_keys::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -325,10 +334,11 @@ pub(crate) async fn delete_backup_keys_for_room_route(
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
Ok(delete_backup_keys_for_room::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
@ -351,10 +361,11 @@ pub(crate) async fn delete_backup_keys_for_session_route(
)?;
Ok(delete_backup_keys_for_session::v3::Response {
count: (services()
count: services()
.key_backups
.count_keys(sender_user, &body.version)? as u32)
.into(),
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,

View file

@ -2,6 +2,7 @@ use crate::{services, Error, Result, Ruma};
use ruma::{
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
events::StateEventType,
uint,
};
use std::collections::HashSet;
use tracing::error;
@ -70,7 +71,8 @@ pub(crate) async fn get_context_route(
}
// Use limit with maximum 100
let limit = u64::from(body.limit).min(100) as usize;
let half_limit =
usize::try_from(body.limit.min(uint!(100)) / uint!(2)).expect("0-50 should fit in usize");
let base_event = base_event.to_room_event();
@ -78,7 +80,7 @@ pub(crate) async fn get_context_route(
.rooms
.timeline
.pdus_until(sender_user, &room_id, base_token)?
.take(limit / 2)
.take(half_limit)
.filter_map(|r| r.ok()) // Remove buggy events
.filter(|(_, pdu)| {
services()
@ -115,7 +117,7 @@ pub(crate) async fn get_context_route(
.rooms
.timeline
.pdus_after(sender_user, &room_id, base_token)?
.take(limit / 2)
.take(half_limit)
.filter_map(|r| r.ok()) // Remove buggy events
.filter(|(_, pdu)| {
services()

View file

@ -24,7 +24,7 @@ use ruma::{
},
StateEventType,
},
ServerName, UInt,
uint, ServerName, UInt,
};
use tracing::{error, info, warn};
@ -157,8 +157,8 @@ pub(crate) async fn get_public_rooms_filtered_helper(
});
}
let limit = limit.map_or(10, u64::from);
let mut num_since = 0_u64;
let limit = limit.unwrap_or(uint!(10));
let mut num_since = UInt::MIN;
if let Some(s) = &since {
let mut characters = s.chars();
@ -340,21 +340,21 @@ pub(crate) async fn get_public_rooms_filtered_helper(
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
let total_room_count_estimate = (all_rooms.len() as u32).into();
let total_room_count_estimate = all_rooms.len().try_into().unwrap_or(UInt::MAX);
let chunk: Vec<_> = all_rooms
.into_iter()
.skip(num_since as usize)
.take(limit as usize)
.skip(num_since.try_into().expect("UInt should fit in usize"))
.take(limit.try_into().expect("UInt should fit in usize"))
.collect();
let prev_batch = if num_since == 0 {
let prev_batch = if num_since == uint!(0) {
None
} else {
Some(format!("p{num_since}"))
};
let next_batch = if chunk.len() < limit as usize {
let next_batch = if chunk.len() < limit.try_into().expect("UInt should fit in usize") {
None
} else {
Some(format!("n{}", num_since + limit))

View file

@ -8,6 +8,7 @@ use ruma::{
message::{get_message_events, send_message_event},
},
events::{StateEventType, TimelineEventType},
uint,
};
use std::{
collections::{BTreeMap, HashSet},
@ -136,7 +137,11 @@ pub(crate) async fn get_message_events_route(
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
.await?;
let limit = u64::from(body.limit).min(100) as usize;
let limit = body
.limit
.min(uint!(100))
.try_into()
.expect("0-100 should fit in usize");
let next_token;

View file

@ -1,6 +1,9 @@
use ruma::api::client::relations::{
get_relating_events, get_relating_events_with_rel_type,
get_relating_events_with_rel_type_and_event_type,
use ruma::{
api::client::relations::{
get_relating_events, get_relating_events_with_rel_type,
get_relating_events_with_rel_type_and_event_type,
},
uint,
};
use crate::{service::rooms::timeline::PduCount, services, Result, Ruma};
@ -28,9 +31,10 @@ pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route(
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
.map(|x| x.min(uint!(100)))
.unwrap_or(uint!(10))
.try_into()
.expect("0-100 should fit in usize");
let res = services()
.rooms
@ -78,9 +82,10 @@ pub(crate) async fn get_relating_events_with_rel_type_route(
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
.map(|x| x.min(uint!(100)))
.unwrap_or(uint!(10))
.try_into()
.expect("0-100 should fit in usize");
let res = services()
.rooms
@ -126,9 +131,10 @@ pub(crate) async fn get_relating_events_route(
// Use limit or else 10, with maximum 100
let limit = body
.limit
.and_then(|u| u32::try_from(u).ok())
.map_or(10_usize, |u| u as usize)
.min(100);
.map(|x| x.min(uint!(100)))
.unwrap_or(uint!(10))
.try_into()
.expect("0-100 should fit in usize");
services()
.rooms

View file

@ -1,10 +1,13 @@
use crate::{services, Error, Result, Ruma};
use ruma::api::client::{
error::ErrorKind,
search::search_events::{
self,
v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
use ruma::{
api::client::{
error::ErrorKind,
search::search_events::{
self,
v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult},
},
},
uint,
};
use std::collections::BTreeMap;
@ -32,7 +35,12 @@ pub(crate) async fn search_events_route(
});
// Use limit or else 10, with maximum 100
let limit = filter.limit.map_or(10, u64::from).min(100) as usize;
let limit = filter
.limit
.map(|x| x.min(uint!(100)))
.unwrap_or(uint!(10))
.try_into()
.expect("0-100 should fit in usize");
let mut searches = Vec::new();
@ -123,8 +131,8 @@ pub(crate) async fn search_events_route(
Ok(search_events::v3::Response::new(ResultCategories {
room_events: ResultRoomEvents {
count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it
groups: BTreeMap::new(), // TODO
count: None,
groups: BTreeMap::new(), // TODO
next_batch,
results,
state: BTreeMap::new(), // TODO

View file

@ -1,5 +1,5 @@
use crate::{services, Result, Ruma};
use ruma::api::client::space::get_hierarchy;
use ruma::{api::client::space::get_hierarchy, uint};
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
///
@ -15,9 +15,17 @@ pub(crate) async fn get_hierarchy_route(
.and_then(|s| s.parse::<usize>().ok())
.unwrap_or(0);
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
let limit = body
.limit
.map(|x| x.min(uint!(100)))
.unwrap_or(uint!(10))
.try_into()
.expect("0-100 should fit in usize");
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
let max_depth = usize::try_from(body.max_depth.map(|x| x.min(uint!(10))).unwrap_or(uint!(3)))
.expect("0-10 should fit in usize")
// Skip the space room itself
+ 1;
services()
.rooms

View file

@ -1087,8 +1087,8 @@ async fn load_joined_room(
},
summary: RoomSummary {
heroes,
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
joined_member_count: joined_member_count.map(UInt::new_saturating),
invited_member_count: invited_member_count.map(UInt::new_saturating),
},
unread_notifications: UnreadNotificationsCount {
highlight_count,
@ -1140,7 +1140,7 @@ fn load_timeline(
// Take the last events for the timeline
timeline_pdus = non_timeline_pdus
.by_ref()
.take(limit as usize)
.take(limit.try_into().expect("limit should fit in usize"))
.collect::<Vec<_>>()
.into_iter()
.rev()
@ -1427,12 +1427,16 @@ pub(crate) async fn sync_events_v4_route(
.ranges
.into_iter()
.map(|mut r| {
r.0 =
r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1));
r.1 =
r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1));
let room_ids = all_joined_rooms
[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)]
r.0 = r.0.clamp(
uint!(0),
UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX),
);
r.1 = r.1.clamp(
r.0,
UInt::try_from(all_joined_rooms.len() - 1).unwrap_or(UInt::MAX),
);
let room_ids = all_joined_rooms[r.0.try_into().unwrap_or(usize::MAX)
..=r.1.try_into().unwrap_or(usize::MAX)]
.to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
@ -1468,7 +1472,7 @@ pub(crate) async fn sync_events_v4_route(
}
})
.collect(),
count: UInt::from(all_joined_rooms.len() as u32),
count: UInt::try_from(all_joined_rooms.len()).unwrap_or(UInt::MAX),
},
);
@ -1663,20 +1667,20 @@ pub(crate) async fn sync_events_v4_route(
prev_batch,
limited,
joined_count: Some(
(services()
services()
.rooms
.state_cache
.room_joined_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.map(UInt::new_saturating)
.unwrap_or(uint!(0)),
),
invited_count: Some(
(services()
services()
.rooms
.state_cache
.room_invited_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.map(UInt::new_saturating)
.unwrap_or(uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,

View file

@ -30,7 +30,8 @@ pub(crate) async fn create_typing_event_route(
.typing_add(
sender_user,
&body.room_id,
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
duration.as_millis().try_into().unwrap_or(u64::MAX)
+ utils::millis_since_unix_epoch(),
)
.await?;
} else {

View file

@ -17,7 +17,7 @@ pub(crate) async fn search_users_route(
body: Ruma<search_users::v3::Request>,
) -> Result<search_users::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let limit = u64::from(body.limit) as usize;
let limit = body.limit.try_into().unwrap_or(usize::MAX);
let mut users = services().users.iter().filter_map(|user_id| {
// Filter out buggy users (they should not exist, but you never know...)