mirror of
https://gitlab.computer.surgery/matrix/grapevine.git
synced 2025-12-16 23:31:24 +01:00
enable dead_code lint
This commit is contained in:
parent
518d0c9cf3
commit
d7e945f4c5
17 changed files with 15 additions and 137 deletions
|
|
@ -14,7 +14,6 @@ unused_import_braces = "warn"
|
||||||
unused_lifetimes = "warn"
|
unused_lifetimes = "warn"
|
||||||
unused_macro_rules = "warn"
|
unused_macro_rules = "warn"
|
||||||
|
|
||||||
dead_code = "allow"
|
|
||||||
unused_qualifications = "allow"
|
unused_qualifications = "allow"
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
|
|
|
||||||
|
|
@ -1170,17 +1170,6 @@ impl KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub(crate) fn flush(&self) -> Result<()> {
|
|
||||||
let start = std::time::Instant::now();
|
|
||||||
|
|
||||||
let res = self.db.flush();
|
|
||||||
|
|
||||||
debug!("flush: took {:?}", start.elapsed());
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument]
|
#[tracing::instrument]
|
||||||
pub(crate) fn start_cleanup_task() {
|
pub(crate) fn start_cleanup_task() {
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
use std::{future::Future, pin::Pin, sync::Arc};
|
use std::{future::Future, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
use super::Config;
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
#[cfg(feature = "sqlite")]
|
||||||
|
|
@ -13,11 +12,11 @@ pub(crate) mod rocksdb;
|
||||||
pub(crate) mod watchers;
|
pub(crate) mod watchers;
|
||||||
|
|
||||||
pub(crate) trait KeyValueDatabaseEngine: Send + Sync {
|
pub(crate) trait KeyValueDatabaseEngine: Send + Sync {
|
||||||
fn open(config: &Config) -> Result<Self>
|
#[cfg(any(feature = "sqlite", feature = "rocksdb"))]
|
||||||
|
fn open(config: &super::Config) -> Result<Self>
|
||||||
where
|
where
|
||||||
Self: Sized;
|
Self: Sized;
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
|
||||||
fn flush(&self) -> Result<()>;
|
|
||||||
fn cleanup(&self) -> Result<()> {
|
fn cleanup(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
@ -25,7 +24,6 @@ pub(crate) trait KeyValueDatabaseEngine: Send + Sync {
|
||||||
Ok("Current database engine does not support memory usage reporting."
|
Ok("Current database engine does not support memory usage reporting."
|
||||||
.to_owned())
|
.to_owned())
|
||||||
}
|
}
|
||||||
fn clear_caches(&self) {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) trait KvTree: Send + Sync {
|
pub(crate) trait KvTree: Send + Sync {
|
||||||
|
|
|
||||||
|
|
@ -139,11 +139,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush(&self) -> Result<()> {
|
|
||||||
// TODO?
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
|
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
|
||||||
fn memory_usage(&self) -> Result<String> {
|
fn memory_usage(&self) -> Result<String> {
|
||||||
let stats =
|
let stats =
|
||||||
|
|
@ -161,8 +156,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_caches(&self) {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RocksDbEngineTree<'_> {
|
impl RocksDbEngineTree<'_> {
|
||||||
|
|
|
||||||
|
|
@ -164,11 +164,6 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush(&self) -> Result<()> {
|
|
||||||
// we enabled PRAGMA synchronous=normal, so this should not be necessary
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cleanup(&self) -> Result<()> {
|
fn cleanup(&self) -> Result<()> {
|
||||||
self.flush_wal()
|
self.flush_wal()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use ruma::{CanonicalJsonObject, EventId};
|
use ruma::{CanonicalJsonObject, EventId};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result};
|
use crate::{database::KeyValueDatabase, service, Error, Result};
|
||||||
|
|
||||||
impl service::rooms::outlier::Data for KeyValueDatabase {
|
impl service::rooms::outlier::Data for KeyValueDatabase {
|
||||||
fn get_outlier_pdu_json(
|
fn get_outlier_pdu_json(
|
||||||
|
|
@ -16,16 +16,6 @@ impl service::rooms::outlier::Data for KeyValueDatabase {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
|
|
||||||
self.eventid_outlierpdu.get(event_id.as_bytes())?.map_or(
|
|
||||||
Ok(None),
|
|
||||||
|pdu| {
|
|
||||||
serde_json::from_slice(&pdu)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in db."))
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, pdu))]
|
#[tracing::instrument(skip(self, pdu))]
|
||||||
fn add_pdu_outlier(
|
fn add_pdu_outlier(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
||||||
|
|
@ -377,39 +377,6 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all User IDs who ever joined a room.
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
fn room_useroncejoined<'a>(
|
|
||||||
&'a self,
|
|
||||||
room_id: &RoomId,
|
|
||||||
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
|
||||||
prefix.push(0xFF);
|
|
||||||
|
|
||||||
Box::new(self.roomuseroncejoinedids.scan_prefix(prefix).map(
|
|
||||||
|(key, _)| {
|
|
||||||
UserId::parse(
|
|
||||||
utils::string_from_bytes(
|
|
||||||
key.rsplit(|&b| b == 0xFF)
|
|
||||||
.next()
|
|
||||||
.expect("rsplit always returns an element"),
|
|
||||||
)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"User ID in room_useroncejoined is invalid \
|
|
||||||
unicode.",
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
.map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"User ID in room_useroncejoined is invalid.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over all invited members of a room.
|
/// Returns an iterator over all invited members of a room.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn room_members_invited<'a>(
|
fn room_members_invited<'a>(
|
||||||
|
|
|
||||||
|
|
@ -48,23 +48,6 @@ impl service::sending::Data for KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn delete_all_requests_for(
|
|
||||||
&self,
|
|
||||||
outgoing_kind: &OutgoingKind,
|
|
||||||
) -> Result<()> {
|
|
||||||
let prefix = outgoing_kind.get_prefix();
|
|
||||||
for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone())
|
|
||||||
{
|
|
||||||
self.servercurrentevent_data.remove(&key).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
for (key, _) in self.servernameevent_data.scan_prefix(prefix) {
|
|
||||||
self.servernameevent_data.remove(&key).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn queue_requests(
|
fn queue_requests(
|
||||||
&self,
|
&self,
|
||||||
requests: &[(&OutgoingKind, SendingEventType)],
|
requests: &[(&OutgoingKind, SendingEventType)],
|
||||||
|
|
|
||||||
|
|
@ -406,19 +406,6 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
|
|
||||||
self.userid_lastonetimekeyupdate.get(user_id.as_bytes())?.map_or(
|
|
||||||
Ok(0),
|
|
||||||
|bytes| {
|
|
||||||
utils::u64_from_bytes(&bytes).map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Count in roomid_lastroomactiveupdate is invalid.",
|
|
||||||
)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn take_one_time_key(
|
fn take_one_time_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
||||||
|
|
@ -83,7 +83,10 @@ pub(crate) struct Service {
|
||||||
/// the database.
|
/// the database.
|
||||||
pub(crate) struct RotationHandler(
|
pub(crate) struct RotationHandler(
|
||||||
broadcast::Sender<()>,
|
broadcast::Sender<()>,
|
||||||
broadcast::Receiver<()>,
|
// TODO: Determine if it's safe to delete this field. I'm not deleting it
|
||||||
|
// right now because I'm unsure what implications that would have for how
|
||||||
|
// the sender expects to work.
|
||||||
|
#[allow(dead_code)] broadcast::Receiver<()>,
|
||||||
);
|
);
|
||||||
|
|
||||||
impl RotationHandler {
|
impl RotationHandler {
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,8 @@ pub(crate) trait Data: Send + Sync {
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Returns the private read marker.
|
/// Returns the private read marker.
|
||||||
|
// TODO: Implement MSC2285
|
||||||
|
#[allow(dead_code)]
|
||||||
fn private_read_get(
|
fn private_read_get(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
|
|
@ -46,6 +48,8 @@ pub(crate) trait Data: Send + Sync {
|
||||||
) -> Result<Option<u64>>;
|
) -> Result<Option<u64>>;
|
||||||
|
|
||||||
/// Returns the count of the last typing update in this room.
|
/// Returns the count of the last typing update in this room.
|
||||||
|
// TODO: Implement MSC2285
|
||||||
|
#[allow(dead_code)]
|
||||||
fn last_privateread_update(
|
fn last_privateread_update(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
use ruma::{CanonicalJsonObject, EventId};
|
use ruma::{CanonicalJsonObject, EventId};
|
||||||
|
|
||||||
use crate::{PduEvent, Result};
|
use crate::Result;
|
||||||
|
|
||||||
pub(crate) trait Data: Send + Sync {
|
pub(crate) trait Data: Send + Sync {
|
||||||
/// Returns the pdu from the outlier tree.
|
/// Returns the pdu from the outlier tree.
|
||||||
|
|
@ -8,7 +8,7 @@ pub(crate) trait Data: Send + Sync {
|
||||||
&self,
|
&self,
|
||||||
event_id: &EventId,
|
event_id: &EventId,
|
||||||
) -> Result<Option<CanonicalJsonObject>>;
|
) -> Result<Option<CanonicalJsonObject>>;
|
||||||
fn get_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>>;
|
|
||||||
/// Append the PDU as an outlier.
|
/// Append the PDU as an outlier.
|
||||||
fn add_pdu_outlier(
|
fn add_pdu_outlier(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
||||||
|
|
@ -313,15 +313,6 @@ impl Service {
|
||||||
self.db.room_invited_count(room_id)
|
self.db.room_invited_count(room_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all User IDs who ever joined a room.
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub(crate) fn room_useroncejoined<'a>(
|
|
||||||
&'a self,
|
|
||||||
room_id: &RoomId,
|
|
||||||
) -> impl Iterator<Item = Result<OwnedUserId>> + 'a {
|
|
||||||
self.db.room_useroncejoined(room_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over all invited members of a room.
|
/// Returns an iterator over all invited members of a room.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub(crate) fn room_members_invited<'a>(
|
pub(crate) fn room_members_invited<'a>(
|
||||||
|
|
|
||||||
|
|
@ -68,12 +68,6 @@ pub(crate) trait Data: Send + Sync {
|
||||||
|
|
||||||
fn room_invited_count(&self, room_id: &RoomId) -> Result<Option<u64>>;
|
fn room_invited_count(&self, room_id: &RoomId) -> Result<Option<u64>>;
|
||||||
|
|
||||||
/// Returns an iterator over all User IDs who ever joined a room.
|
|
||||||
fn room_useroncejoined<'a>(
|
|
||||||
&'a self,
|
|
||||||
room_id: &RoomId,
|
|
||||||
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a>;
|
|
||||||
|
|
||||||
/// Returns an iterator over all invited members of a room.
|
/// Returns an iterator over all invited members of a room.
|
||||||
fn room_members_invited<'a>(
|
fn room_members_invited<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
@ -137,5 +131,7 @@ pub(crate) trait Data: Send + Sync {
|
||||||
|
|
||||||
fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
||||||
|
|
||||||
|
// TODO: Use this when implementing sync filtering
|
||||||
|
#[allow(dead_code)]
|
||||||
fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool>;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -588,17 +588,6 @@ impl Service {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Cleanup event data
|
|
||||||
/// Used for instance after we remove an appservice registration
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub(crate) fn cleanup_events(&self, appservice_id: String) -> Result<()> {
|
|
||||||
self.db.delete_all_requests_for(&OutgoingKind::Appservice(
|
|
||||||
appservice_id,
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(events))]
|
#[tracing::instrument(skip(events))]
|
||||||
async fn handle_events(
|
async fn handle_events(
|
||||||
kind: OutgoingKind,
|
kind: OutgoingKind,
|
||||||
|
|
|
||||||
|
|
@ -20,10 +20,6 @@ pub(crate) trait Data: Send + Sync {
|
||||||
&self,
|
&self,
|
||||||
outgoing_kind: &OutgoingKind,
|
outgoing_kind: &OutgoingKind,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
fn delete_all_requests_for(
|
|
||||||
&self,
|
|
||||||
outgoing_kind: &OutgoingKind,
|
|
||||||
) -> Result<()>;
|
|
||||||
fn queue_requests(
|
fn queue_requests(
|
||||||
&self,
|
&self,
|
||||||
requests: &[(&OutgoingKind, SendingEventType)],
|
requests: &[(&OutgoingKind, SendingEventType)],
|
||||||
|
|
|
||||||
|
|
@ -116,8 +116,6 @@ pub(crate) trait Data: Send + Sync {
|
||||||
one_time_key_value: &Raw<OneTimeKey>,
|
one_time_key_value: &Raw<OneTimeKey>,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64>;
|
|
||||||
|
|
||||||
fn take_one_time_key(
|
fn take_one_time_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue