enable let_underscore_must_use lint

This commit is contained in:
Charles Hall 2024-05-12 17:02:08 -07:00
parent 2ded335adb
commit 052f3088e9
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
12 changed files with 69 additions and 32 deletions

View file

@ -32,6 +32,7 @@ format_push_string = "warn"
get_unwrap = "warn"
if_then_some_else_none = "warn"
impl_trait_in_params = "warn"
let_underscore_must_use = "warn"
lossy_float_literal = "warn"
mem_forget = "warn"
mod_module_files = "warn"

View file

@ -1400,7 +1400,9 @@ pub(crate) async fn leave_all_rooms(user_id: &UserId) -> Result<()> {
Err(_) => continue,
};
let _ = leave_room(user_id, &room_id, None).await;
if let Err(error) = leave_room(user_id, &room_id, None).await {
warn!(%user_id, %room_id, %error, "failed to leave room");
}
}
Ok(())

View file

@ -13,6 +13,7 @@ use ruma::{
};
use serde_json::value::to_raw_value;
use std::sync::Arc;
use tracing::warn;
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
///
@ -84,11 +85,14 @@ pub(crate) async fn set_displayname_route(
);
let state_lock = mutex_state.lock().await;
let _ = services()
if let Err(error) = services()
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.await;
.await
{
warn!(%error, "failed to add PDU");
}
}
Ok(set_display_name::v3::Response {})
@ -198,11 +202,14 @@ pub(crate) async fn set_avatar_url_route(
);
let state_lock = mutex_state.lock().await;
let _ = services()
if let Err(error) = services()
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.await;
.await
{
warn!(%error, "failed to add PDU");
};
}
Ok(set_avatar_url::v3::Response {})

View file

@ -480,7 +480,11 @@ pub(crate) async fn create_room_route(
// 8. Events implied by invite (and TODO: invite_3pid)
drop(state_lock);
for user_id in &body.invite {
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
if let Err(error) =
invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await
{
warn!(%error, "invite helper failed");
};
}
// Homeserver specific stuff

View file

@ -29,7 +29,7 @@ use std::{
time::Duration,
};
use tokio::sync::watch::Sender;
use tracing::{error, info};
use tracing::{debug, error, info};
/// # `GET /_matrix/client/r0/sync`
///
@ -164,7 +164,8 @@ async fn sync_helper_wrapper(
}
}
let _ = tx.send(Some(r.map(|(r, _)| r)));
tx.send(Some(r.map(|(r, _)| r)))
.expect("receiver should not be dropped");
}
async fn sync_helper(
@ -543,7 +544,10 @@ async fn sync_helper(
if duration.as_secs() > 30 {
duration = Duration::from_secs(30);
}
let _ = tokio::time::timeout(duration, watcher).await;
match tokio::time::timeout(duration, watcher).await {
Ok(x) => x.expect("watcher should succeed"),
Err(error) => debug!(%error, "timed out"),
};
Ok((response, false))
} else {
Ok((response, since != next_batch)) // Only cache if we made progress
@ -1681,7 +1685,10 @@ pub(crate) async fn sync_events_v4_route(
if duration.as_secs() > 30 {
duration = Duration::from_secs(30);
}
let _ = tokio::time::timeout(duration, watcher).await;
match tokio::time::timeout(duration, watcher).await {
Ok(x) => x.expect("watcher should succeed"),
Err(error) => debug!(%error, "timed out"),
};
}
Ok(sync_events::v4::Response {

View file

@ -96,9 +96,9 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
if !self.old_cfs.contains(&name.to_owned()) {
// Create if it didn't exist
let _ = self
.rocks
.create_cf(name, &db_options(self.max_open_files, &self.cache));
self.rocks
.create_cf(name, &db_options(self.max_open_files, &self.cache))
.expect("should be able to create column family");
}
Ok(Arc::new(RocksDbEngineTree {

View file

@ -47,7 +47,7 @@ impl Watchers {
let mut watchers = self.watchers.write().unwrap();
for prefix in triggered {
if let Some(tx) = watchers.remove(prefix) {
let _ = tx.0.send(());
tx.0.send(()).expect("channel should still be open");
}
}
};

View file

@ -227,7 +227,8 @@ async fn run_server() -> io::Result<()> {
let server = bind_rustls(addr, conf).handle(handle).serve(app);
#[cfg(feature = "systemd")]
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("should be able to notify systemd");
server.await?
}
@ -235,7 +236,8 @@ async fn run_server() -> io::Result<()> {
let server = bind(addr).handle(handle).serve(app);
#[cfg(feature = "systemd")]
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("should be able to notify systemd");
server.await?
}
@ -494,7 +496,8 @@ async fn shutdown_signal(handle: ServerHandle) {
services().globals.shutdown();
#[cfg(feature = "systemd")]
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]);
sd_notify::notify(true, &[sd_notify::NotifyState::Stopping])
.expect("should be able to notify systemd");
}
async fn federation_disabled(_: Uri) -> impl IntoResponse {

View file

@ -29,6 +29,7 @@ use ruma::{
};
use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, RwLock};
use tracing::warn;
use crate::{
api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH},
@ -784,7 +785,9 @@ impl Service {
if leave_rooms {
for &user_id in &user_ids {
let _ = leave_all_rooms(user_id).await;
if let Err(error) = leave_all_rooms(user_id).await {
warn!(%user_id, %error, "failed to leave one or more rooms");
}
}
}

View file

@ -92,12 +92,12 @@ impl RotationHandler {
let mut r = self.0.subscribe();
async move {
let _ = r.recv().await;
r.recv().await.expect("should receive a message");
}
}
pub(crate) fn fire(&self) {
let _ = self.0.send(());
self.0.send(()).expect("should be able to send message");
}
}

View file

@ -1,6 +1,7 @@
use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId};
use std::collections::BTreeMap;
use tokio::sync::{broadcast, RwLock};
use tracing::trace;
use crate::{services, utils, Result};
@ -29,7 +30,9 @@ impl Service {
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
Ok(())
}
@ -45,7 +48,9 @@ impl Service {
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
Ok(())
}
@ -86,7 +91,9 @@ impl Service {
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
let _ = self.typing_update_sender.send(room_id.to_owned());
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
}
Ok(())
}

View file

@ -1533,19 +1533,22 @@ impl Service {
// Try to fetch keys, failure is okay
// Servers we couldn't find in the cache will be added to `servers`
for pdu in &event.room_state.state {
let _ = self
for pdu in event
.room_state
.state
.iter()
.chain(&event.room_state.auth_chain)
{
if let Err(error) = self
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await;
}
for pdu in &event.room_state.auth_chain {
let _ = self
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await;
.await
{
debug!(%error, "failed to get server keys from cache");
};
}
drop(pkm);
}
};
if servers.is_empty() {
info!("We had all keys locally");