enable redundant_closure_for_method_calls lint

This commit is contained in:
Charles Hall 2024-05-14 18:59:24 -07:00
parent a636405bed
commit 96e1877639
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
37 changed files with 85 additions and 88 deletions

View file

@ -60,6 +60,7 @@ negative_feature_names = "warn"
pub_without_shorthand = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
redundant_closure_for_method_calls = "warn"
redundant_feature_names = "warn"
redundant_type_annotations = "warn"
ref_patterns = "warn"

View file

@ -30,7 +30,7 @@ where
&[MatrixVersion::V1_0],
)
.unwrap()
.map(|body| body.freeze());
.map(BytesMut::freeze);
let mut parts = http_request.uri().clone().into_parts();
let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned();

View file

@ -353,7 +353,7 @@ pub(crate) async fn change_password_route(
for id in services()
.users
.all_device_ids(sender_user)
.filter_map(|id| id.ok())
.filter_map(Result::ok)
.filter(|id| id != sender_device)
{
services().users.remove_device(sender_user, &id)?;

View file

@ -81,7 +81,7 @@ pub(crate) async fn get_context_route(
.timeline
.pdus_until(sender_user, &room_id, base_token)?
.take(half_limit)
.filter_map(|r| r.ok()) // Remove buggy events
.filter_map(Result::ok) // Remove buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -117,7 +117,7 @@ pub(crate) async fn get_context_route(
.timeline
.pdus_after(sender_user, &room_id, base_token)?
.take(half_limit)
.filter_map(|r| r.ok()) // Remove buggy events
.filter_map(Result::ok) // Remove buggy events
.filter(|(_, pdu)| {
services()
.rooms

View file

@ -18,7 +18,7 @@ pub(crate) async fn get_devices_route(
let devices: Vec<device::Device> = services()
.users
.all_devices_metadata(sender_user)
.filter_map(|r| r.ok()) // Filter out buggy devices
.filter_map(Result::ok) // Filter out buggy devices
.collect();
Ok(get_devices::v3::Response { devices })

View file

@ -226,14 +226,14 @@ pub(crate) async fn get_key_changes_route(
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
),
)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
for room_id in services()
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
device_list_updates.extend(
services()
@ -247,7 +247,7 @@ pub(crate) async fn get_key_changes_route(
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")
})?),
)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
}
Ok(get_key_changes::v3::Response {

View file

@ -59,7 +59,7 @@ pub(crate) async fn join_room_by_id_route(
.iter()
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| sender.as_str().map(ToOwned::to_owned))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
@ -105,7 +105,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
.iter()
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| sender.as_str().map(ToOwned::to_owned))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned()),
);
@ -435,7 +435,7 @@ pub(crate) async fn joined_rooms_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect(),
})
}
@ -501,7 +501,7 @@ pub(crate) async fn joined_members_route(
.rooms
.state_cache
.room_members(&body.room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
let display_name = services().users.displayname(&user_id)?;
let avatar_url = services().users.avatar_url(&user_id)?;
@ -1308,7 +1308,7 @@ pub(crate) async fn invite_helper(
.rooms
.state_cache
.room_servers(room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|server| &**server != services().globals.server_name());
services().sending.send_pdu(servers, &pdu_id)?;
@ -1512,7 +1512,7 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> {
.iter()
.filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.filter_map(|sender| sender.as_str().map(ToOwned::to_owned))
.filter_map(|sender| UserId::parse(sender).ok())
.map(|user| user.server_name().to_owned())
.collect();

View file

@ -156,7 +156,7 @@ pub(crate) async fn get_message_events_route(
.timeline
.pdus_after(sender_user, &body.room_id, from)?
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -205,7 +205,7 @@ pub(crate) async fn get_message_events_route(
.timeline
.pdus_until(sender_user, &body.room_id, from)?
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms

View file

@ -34,7 +34,7 @@ pub(crate) async fn set_displayname_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
@ -70,7 +70,7 @@ pub(crate) async fn set_displayname_route(
room_id,
))
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect();
for (pdu_builder, room_id) in all_rooms_joined {
@ -151,7 +151,7 @@ pub(crate) async fn set_avatar_url_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
@ -187,7 +187,7 @@ pub(crate) async fn set_avatar_url_route(
room_id,
))
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect();
for (pdu_builder, room_id) in all_joined_rooms {

View file

@ -9,7 +9,7 @@ use ruma::{
},
},
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
push::{InsertPushRuleError, RemovePushRuleError},
push::{AnyPushRuleRef, InsertPushRuleError, RemovePushRuleError},
};
/// # `GET /_matrix/client/r0/pushrules`
@ -281,7 +281,7 @@ pub(crate) async fn get_pushrule_enabled_route(
let global = account_data.content.global;
let enabled = global
.get(body.kind.clone(), &body.rule_id)
.map(|r| r.enabled())
.map(AnyPushRuleRef::enabled)
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Push rule not found.",

View file

@ -565,7 +565,7 @@ pub(crate) async fn get_room_aliases_route(
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.filter_map(|a| a.ok())
.filter_map(Result::ok)
.collect(),
})
}
@ -814,7 +814,7 @@ pub(crate) async fn upgrade_room_route(
.rooms
.alias
.local_aliases_for_room(&body.room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
services()
.rooms

View file

@ -30,7 +30,7 @@ pub(crate) async fn search_events_route(
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect()
});
@ -118,7 +118,7 @@ pub(crate) async fn search_events_route(
result: Some(result),
})
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.skip(skip)
.take(limit)
.collect();

View file

@ -217,7 +217,7 @@ async fn sync_helper(
services()
.users
.keys_changed(sender_user.as_ref(), since, None)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
let all_joined_rooms = services()
@ -461,7 +461,7 @@ async fn sync_helper(
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter_map(|other_room_id| {
Some(
services()
@ -639,7 +639,7 @@ async fn load_joined_room(
.rooms
.timeline
.all_pdus(sender_user, room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter_map(Result::ok) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent =
@ -674,7 +674,7 @@ async fn load_joined_room(
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
.filter_map(Result::ok)
// Filter for possible heroes
.flatten()
{
@ -978,7 +978,7 @@ async fn load_joined_room(
services()
.users
.keys_changed(room_id.as_ref(), since, None)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
let notification_count = send_notification_counts
@ -1018,7 +1018,7 @@ async fn load_joined_room(
.edus
.read_receipt
.readreceipts_since(room_id, since)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(Result::ok) // Filter out buggy events
.map(|(_, _, v)| v)
.collect();
@ -1139,7 +1139,7 @@ fn share_encrypted_room(
.rooms
.user
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|room_id| room_id != ignore_room)
.filter_map(|other_room_id| {
Some(
@ -1192,7 +1192,7 @@ pub(crate) async fn sync_events_v4_route(
.rooms
.state_cache
.rooms_joined(&sender_user)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect::<Vec<_>>();
if body.extensions.to_device.enabled.unwrap_or(false) {
@ -1211,7 +1211,7 @@ pub(crate) async fn sync_events_v4_route(
services()
.users
.keys_changed(sender_user.as_ref(), globalsince, None)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
for room_id in &all_joined_rooms {
@ -1352,7 +1352,7 @@ pub(crate) async fn sync_events_v4_route(
services()
.users
.keys_changed(room_id.as_ref(), globalsince, None)
.filter_map(|r| r.ok()),
.filter_map(Result::ok),
);
}
for user_id in left_encrypted_users {
@ -1360,7 +1360,7 @@ pub(crate) async fn sync_events_v4_route(
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter_map(|other_room_id| {
Some(
services()
@ -1552,7 +1552,7 @@ pub(crate) async fn sync_events_v4_route(
.rooms
.state_cache
.room_members(room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|member| member != &sender_user)
.filter_map(|member| {
services()

View file

@ -27,7 +27,7 @@ pub(crate) async fn get_threads_route(
.threads
.threads_until(sender_user, &body.room_id, from, &body.include)?
.take(limit)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|(_, pdu)| {
services()
.rooms

View file

@ -55,7 +55,7 @@ pub(crate) async fn search_users_route(
.rooms
.state_cache
.rooms_joined(&user_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.any(|room| {
services()
.rooms

View file

@ -1042,7 +1042,7 @@ pub(crate) async fn get_backfill_route(
.take(limit.try_into().unwrap());
let events = all_events
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|(_, e)| {
matches!(
services().rooms.state_accessor.server_can_see_event(
@ -1563,7 +1563,7 @@ async fn create_join_event(
.rooms
.state_cache
.room_servers(room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|server| &**server != services().globals.server_name());
services().sending.send_pdu(servers, &pdu_id)?;
@ -1767,7 +1767,7 @@ pub(crate) async fn get_devices_route(
devices: services()
.users
.all_devices_metadata(&body.user_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter_map(|metadata| {
Some(UserDevice {
keys: services()

View file

@ -780,7 +780,7 @@ impl KeyValueDatabase {
}
// Force E2EE device list updates so we can send them over federation
for user_id in services().users.iter().filter_map(|r| r.ok()) {
for user_id in services().users.iter().filter_map(Result::ok) {
services().users.mark_device_key_update(&user_id)?;
}

View file

@ -188,7 +188,7 @@ impl KvTree for RocksDbEngineTree<'_> {
self.db
.rocks
.iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start)
.map(|r| r.unwrap())
.map(Result::unwrap)
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
)
}
@ -215,7 +215,7 @@ impl KvTree for RocksDbEngineTree<'_> {
},
),
)
.map(|r| r.unwrap())
.map(Result::unwrap)
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
)
}
@ -269,7 +269,7 @@ impl KvTree for RocksDbEngineTree<'_> {
readoptions,
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
)
.map(|r| r.unwrap())
.map(Result::unwrap)
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
.take_while(move |(k, _)| k.starts_with(&prefix)),
)

View file

@ -194,7 +194,7 @@ impl SqliteTable {
statement
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| r.unwrap()),
.map(Result::unwrap),
);
Box::new(PreparedStatementIterator {
@ -291,7 +291,7 @@ impl KvTree for SqliteTable {
statement
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| r.unwrap()),
.map(Result::unwrap),
);
Box::new(PreparedStatementIterator {
iterator,
@ -313,7 +313,7 @@ impl KvTree for SqliteTable {
statement
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
.unwrap()
.map(move |r| r.unwrap()),
.map(Result::unwrap),
);
Box::new(PreparedStatementIterator {

View file

@ -20,7 +20,7 @@ impl service::account_data::Data for KeyValueDatabase {
data: &serde_json::Value,
) -> Result<()> {
let mut prefix = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();
@ -70,7 +70,7 @@ impl service::account_data::Data for KeyValueDatabase {
kind: RoomAccountDataEventType,
) -> Result<Option<Box<serde_json::value::RawValue>>> {
let mut key = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();
@ -105,7 +105,7 @@ impl service::account_data::Data for KeyValueDatabase {
let mut userdata = HashMap::new();
let mut prefix = room_id
.map(|r| r.to_string())
.map(ToString::to_string)
.unwrap_or_default()
.as_bytes()
.to_vec();

View file

@ -48,7 +48,7 @@ impl service::appservice::Data for KeyValueDatabase {
fn all(&self) -> Result<Vec<(String, Registration)>> {
self.iter_ids()?
.filter_map(|id| id.ok())
.filter_map(Result::ok)
.map(move |id| {
Ok((
id.clone(),

View file

@ -56,7 +56,7 @@ impl service::globals::Data for KeyValueDatabase {
.rooms
.state_cache
.rooms_joined(user_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
let short_roomid = services()
.rooms

View file

@ -283,7 +283,7 @@ impl service::key_backups::Data for KeyValueDatabase {
Ok::<_, Error>((session_id, key_data))
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect())
}

View file

@ -101,7 +101,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
let mut joined_servers = HashSet::new();
let mut real_users = HashSet::new();
for joined in self.room_members(room_id).filter_map(|r| r.ok()) {
for joined in self.room_members(room_id).filter_map(Result::ok) {
joined_servers.insert(joined.server_name().to_owned());
if joined.server_name() == services().globals.server_name()
&& !services().users.is_deactivated(&joined).unwrap_or(true)
@ -111,7 +111,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
joinedcount += 1;
}
for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) {
for _invited in self.room_members_invited(room_id).filter_map(Result::ok) {
invitedcount += 1;
}
@ -126,7 +126,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
.unwrap()
.insert(room_id.to_owned(), Arc::new(real_users));
for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) {
for old_joined_server in self.room_servers(room_id).filter_map(Result::ok) {
if !joined_servers.remove(&old_joined_server) {
// Server not in room anymore
let mut roomserver_id = room_id.as_bytes().to_vec();

View file

@ -68,7 +68,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
})?)
.map_err(|_| Error::bad_database("Invalid UserId in threadid_userids."))
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect(),
))
} else {

View file

@ -129,7 +129,7 @@ impl service::rooms::user::Data for KeyValueDatabase {
Ok::<_, Error>(room_id)
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
});
// We use the default compare function because keys are sorted correctly (not reversed)

View file

@ -12,7 +12,7 @@ impl service::transaction_ids::Data for KeyValueDatabase {
) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default());
key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());
@ -29,7 +29,7 @@ impl service::transaction_ids::Data for KeyValueDatabase {
) -> Result<Option<Vec<u8>>> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default());
key.extend_from_slice(device_id.map(DeviceId::as_bytes).unwrap_or_default());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());

View file

@ -34,7 +34,7 @@ impl service::uiaa::Data for KeyValueDatabase {
.read()
.unwrap()
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
.map(|j| j.to_owned())
.map(ToOwned::to_owned)
}
fn update_uiaa_session(

View file

@ -632,7 +632,7 @@ impl service::users::Data for KeyValueDatabase {
.rooms
.state_cache
.rooms_joined(user_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
// Don't send key updates to unencrypted rooms
if services()
@ -837,7 +837,7 @@ impl service::users::Data for KeyValueDatabase {
.map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?,
))
})
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.take_while(|&(_, count)| count <= until)
{
self.todeviceid_events.remove(&key)?;

View file

@ -68,7 +68,7 @@ impl Service {
warn!("Failed to find destination {}: {}", destination, e);
Error::BadServerResponse("Invalid destination")
})?
.map(|body| body.freeze());
.map(BytesMut::freeze);
let reqwest_request = reqwest::Request::try_from(http_request)?;

View file

@ -434,9 +434,7 @@ impl Service {
// The original create event must be in the auth events
if !matches!(
auth_events
.get(&(StateEventType::RoomCreate, String::new()))
.map(|a| a.as_ref()),
auth_events.get(&(StateEventType::RoomCreate, String::new())),
Some(_) | None
) {
return Err(Error::BadRequest(
@ -727,9 +725,7 @@ impl Service {
.get_shortstatekey(&StateEventType::RoomCreate, "")?
.expect("Room exists");
if state.get(&create_shortstatekey).map(|id| id.as_ref())
!= Some(&create_event.event_id)
{
if state.get(&create_shortstatekey) != Some(&create_event.event_id) {
return Err(Error::bad_database(
"Incoming event refers to wrong create event.",
));

View file

@ -78,7 +78,7 @@ impl Service {
})
})
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -125,7 +125,7 @@ impl Service {
})
})
.take(limit)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter_map(Result::ok) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms

View file

@ -63,13 +63,13 @@ impl Service {
let mut results = Vec::new();
while let Some(current_room) = {
while stack.last().map_or(false, |s| s.is_empty()) {
while stack.last().map_or(false, Vec::is_empty) {
stack.pop();
}
if stack.is_empty() {
None
} else {
stack.last_mut().and_then(|s| s.pop())
stack.last_mut().and_then(Vec::pop)
}
} {
rooms_in_path.push(current_room.clone());

View file

@ -136,7 +136,7 @@ impl Service {
.rooms
.state_cache
.room_members(room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|member| member.server_name() == origin);
let visibility = match history_visibility {

View file

@ -590,7 +590,7 @@ impl Service {
.rooms
.alias
.local_aliases_for_room(&pdu.room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.any(|room_alias| aliases.is_match(room_alias.as_str()))
};
@ -838,7 +838,7 @@ impl Service {
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter_map(Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
@ -864,7 +864,7 @@ impl Service {
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter_map(Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
@ -965,7 +965,7 @@ impl Service {
.rooms
.state_cache
.room_servers(room_id)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.collect();
// In case we are kicking or banning a user, we need to inform their server of the change

View file

@ -128,7 +128,7 @@ impl Service {
// Retry requests we could not finish yet
let mut initial_transactions = HashMap::<OutgoingKind, Vec<SendingEventType>>::new();
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) {
for (key, outgoing_kind, event) in self.db.active_requests().filter_map(Result::ok) {
let entry = initial_transactions
.entry(outgoing_kind.clone())
.or_default();
@ -158,7 +158,7 @@ impl Service {
self.db.delete_all_active_requests_for(&outgoing_kind)?;
// Find events that have been added since starting the last request
let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::<Vec<_>>();
let new_events = self.db.queued_requests(&outgoing_kind).filter_map(Result::ok).take(30).collect::<Vec<_>>();
if new_events.is_empty() {
current_transaction_status.remove(&outgoing_kind);
@ -244,7 +244,7 @@ impl Service {
for (_, e) in self
.db
.active_requests_for(outgoing_kind)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
{
events.push(e);
}
@ -281,7 +281,7 @@ impl Service {
services()
.users
.keys_changed(room_id.as_ref(), since, None)
.filter_map(|r| r.ok())
.filter_map(Result::ok)
.filter(|user_id| user_id.server_name() == services().globals.server_name()),
);

View file

@ -22,7 +22,7 @@ pub(crate) fn millis_since_unix_epoch() -> u64 {
#[cfg(any(feature = "rocksdb", feature = "sqlite"))]
pub(crate) fn increment(old: Option<&[u8]>) -> Option<Vec<u8>> {
let number = match old.map(|bytes| bytes.try_into()) {
let number = match old.map(TryInto::try_into) {
Some(Ok(bytes)) => {
let number = u64::from_be_bytes(bytes);
number + 1
@ -91,7 +91,7 @@ where
F: Fn(&[u8], &[u8]) -> Ordering,
{
let first_iterator = iterators.next()?;
let mut other_iterators = iterators.map(|i| i.peekable()).collect::<Vec<_>>();
let mut other_iterators = iterators.map(Iterator::peekable).collect::<Vec<_>>();
Some(first_iterator.filter(move |target| {
other_iterators.iter_mut().all(|it| {