enable dead_code lint

This commit is contained in:
Charles Hall 2024-05-21 21:58:43 -07:00
parent 518d0c9cf3
commit d7e945f4c5
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
17 changed files with 15 additions and 137 deletions

View file

@ -14,7 +14,6 @@ unused_import_braces = "warn"
unused_lifetimes = "warn"
unused_macro_rules = "warn"
dead_code = "allow"
unused_qualifications = "allow"
[workspace.lints.clippy]

View file

@ -1170,17 +1170,6 @@ impl KeyValueDatabase {
Ok(())
}
#[tracing::instrument(skip(self))]
pub(crate) fn flush(&self) -> Result<()> {
let start = std::time::Instant::now();
let res = self.db.flush();
debug!("flush: took {:?}", start.elapsed());
res
}
#[tracing::instrument]
pub(crate) fn start_cleanup_task() {
use std::time::{Duration, Instant};

View file

@ -1,6 +1,5 @@
use std::{future::Future, pin::Pin, sync::Arc};
use super::Config;
use crate::Result;
#[cfg(feature = "sqlite")]
@ -13,11 +12,11 @@ pub(crate) mod rocksdb;
pub(crate) mod watchers;
pub(crate) trait KeyValueDatabaseEngine: Send + Sync {
fn open(config: &Config) -> Result<Self>
#[cfg(any(feature = "sqlite", feature = "rocksdb"))]
fn open(config: &super::Config) -> Result<Self>
where
Self: Sized;
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
fn flush(&self) -> Result<()>;
fn cleanup(&self) -> Result<()> {
Ok(())
}
@ -25,7 +24,6 @@ pub(crate) trait KeyValueDatabaseEngine: Send + Sync {
Ok("Current database engine does not support memory usage reporting."
.to_owned())
}
fn clear_caches(&self) {}
}
pub(crate) trait KvTree: Send + Sync {

View file

@ -139,11 +139,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
}))
}
fn flush(&self) -> Result<()> {
// TODO?
Ok(())
}
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
fn memory_usage(&self) -> Result<String> {
let stats =
@ -161,8 +156,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
))
}
fn clear_caches(&self) {}
}
impl RocksDbEngineTree<'_> {

View file

@ -164,11 +164,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
}))
}
fn flush(&self) -> Result<()> {
// we enabled PRAGMA synchronous=normal, so this should not be necessary
Ok(())
}
fn cleanup(&self) -> Result<()> {
self.flush_wal()
}

View file

@ -1,6 +1,6 @@
use ruma::{CanonicalJsonObject, EventId};
use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result};
use crate::{database::KeyValueDatabase, service, Error, Result};
impl service::rooms::outlier::Data for KeyValueDatabase {
fn get_outlier_pdu_json(
@ -16,16 +16,6 @@ impl service::rooms::outlier::Data for KeyValueDatabase {
)
}
fn get_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_outlierpdu.get(event_id.as_bytes())?.map_or(
Ok(None),
|pdu| {
serde_json::from_slice(&pdu)
.map_err(|_| Error::bad_database("Invalid PDU in db."))
},
)
}
#[tracing::instrument(skip(self, pdu))]
fn add_pdu_outlier(
&self,

View file

@ -377,39 +377,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
.transpose()
}
/// Returns an iterator over all User IDs who ever joined a room.
#[tracing::instrument(skip(self))]
fn room_useroncejoined<'a>(
&'a self,
room_id: &RoomId,
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
let mut prefix = room_id.as_bytes().to_vec();
prefix.push(0xFF);
Box::new(self.roomuseroncejoinedids.scan_prefix(prefix).map(
|(key, _)| {
UserId::parse(
utils::string_from_bytes(
key.rsplit(|&b| b == 0xFF)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| {
Error::bad_database(
"User ID in room_useroncejoined is invalid \
unicode.",
)
})?,
)
.map_err(|_| {
Error::bad_database(
"User ID in room_useroncejoined is invalid.",
)
})
},
))
}
/// Returns an iterator over all invited members of a room.
#[tracing::instrument(skip(self))]
fn room_members_invited<'a>(

View file

@ -48,23 +48,6 @@ impl service::sending::Data for KeyValueDatabase {
Ok(())
}
fn delete_all_requests_for(
&self,
outgoing_kind: &OutgoingKind,
) -> Result<()> {
let prefix = outgoing_kind.get_prefix();
for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone())
{
self.servercurrentevent_data.remove(&key).unwrap();
}
for (key, _) in self.servernameevent_data.scan_prefix(prefix) {
self.servernameevent_data.remove(&key).unwrap();
}
Ok(())
}
fn queue_requests(
&self,
requests: &[(&OutgoingKind, SendingEventType)],

View file

@ -406,19 +406,6 @@ impl service::users::Data for KeyValueDatabase {
Ok(())
}
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
self.userid_lastonetimekeyupdate.get(user_id.as_bytes())?.map_or(
Ok(0),
|bytes| {
utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database(
"Count in roomid_lastroomactiveupdate is invalid.",
)
})
},
)
}
fn take_one_time_key(
&self,
user_id: &UserId,

View file

@ -83,7 +83,10 @@ pub(crate) struct Service {
/// the database.
pub(crate) struct RotationHandler(
broadcast::Sender<()>,
broadcast::Receiver<()>,
// TODO: Determine if it's safe to delete this field. I'm not deleting it
// right now because I'm unsure what implications that would have for how
// the sender expects to work.
#[allow(dead_code)] broadcast::Receiver<()>,
);
impl RotationHandler {

View file

@ -39,6 +39,8 @@ pub(crate) trait Data: Send + Sync {
) -> Result<()>;
/// Returns the private read marker.
// TODO: Implement MSC2285
#[allow(dead_code)]
fn private_read_get(
&self,
room_id: &RoomId,
@ -46,6 +48,8 @@ pub(crate) trait Data: Send + Sync {
) -> Result<Option<u64>>;
/// Returns the count of the last typing update in this room.
// TODO: Implement MSC2285
#[allow(dead_code)]
fn last_privateread_update(
&self,
user_id: &UserId,

View file

@ -1,6 +1,6 @@
use ruma::{CanonicalJsonObject, EventId};
use crate::{PduEvent, Result};
use crate::Result;
pub(crate) trait Data: Send + Sync {
/// Returns the pdu from the outlier tree.
@ -8,7 +8,7 @@ pub(crate) trait Data: Send + Sync {
&self,
event_id: &EventId,
) -> Result<Option<CanonicalJsonObject>>;
fn get_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>>;
/// Append the PDU as an outlier.
fn add_pdu_outlier(
&self,

View file

@ -313,15 +313,6 @@ impl Service {
self.db.room_invited_count(room_id)
}
/// Returns an iterator over all User IDs who ever joined a room.
#[tracing::instrument(skip(self))]
pub(crate) fn room_useroncejoined<'a>(
&'a self,
room_id: &RoomId,
) -> impl Iterator<Item = Result<OwnedUserId>> + 'a {
self.db.room_useroncejoined(room_id)
}
/// Returns an iterator over all invited members of a room.
#[tracing::instrument(skip(self))]
pub(crate) fn room_members_invited<'a>(

View file

@ -68,12 +68,6 @@ pub(crate) trait Data: Send + Sync {
fn room_invited_count(&self, room_id: &RoomId) -> Result<Option<u64>>;
/// Returns an iterator over all User IDs who ever joined a room.
fn room_useroncejoined<'a>(
&'a self,
room_id: &RoomId,
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a>;
/// Returns an iterator over all invited members of a room.
fn room_members_invited<'a>(
&'a self,
@ -137,5 +131,7 @@ pub(crate) trait Data: Send + Sync {
fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
// TODO: Use this when implementing sync filtering
#[allow(dead_code)]
fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
}

View file

@ -588,17 +588,6 @@ impl Service {
Ok(())
}
/// Cleanup event data
/// Used for instance after we remove an appservice registration
#[tracing::instrument(skip(self))]
pub(crate) fn cleanup_events(&self, appservice_id: String) -> Result<()> {
self.db.delete_all_requests_for(&OutgoingKind::Appservice(
appservice_id,
))?;
Ok(())
}
#[tracing::instrument(skip(events))]
async fn handle_events(
kind: OutgoingKind,

View file

@ -20,10 +20,6 @@ pub(crate) trait Data: Send + Sync {
&self,
outgoing_kind: &OutgoingKind,
) -> Result<()>;
fn delete_all_requests_for(
&self,
outgoing_kind: &OutgoingKind,
) -> Result<()>;
fn queue_requests(
&self,
requests: &[(&OutgoingKind, SendingEventType)],

View file

@ -116,8 +116,6 @@ pub(crate) trait Data: Send + Sync {
one_time_key_value: &Raw<OneTimeKey>,
) -> Result<()>;
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64>;
fn take_one_time_key(
&self,
user_id: &UserId,