mirror of
https://gitlab.computer.surgery/matrix/grapevine.git
synced 2025-12-17 07:41:23 +01:00
Remove unused cache
This commit is contained in:
parent
6e2eec012f
commit
62bff27d50
2 changed files with 3 additions and 13 deletions
|
|
@ -124,7 +124,6 @@ impl Services {
|
||||||
},
|
},
|
||||||
timeline: rooms::timeline::Service {
|
timeline: rooms::timeline::Service {
|
||||||
db,
|
db,
|
||||||
lasttimelinecount_cache: Mutex::new(HashMap::new()),
|
|
||||||
},
|
},
|
||||||
threads: rooms::threads::Service {
|
threads: rooms::threads::Service {
|
||||||
db,
|
db,
|
||||||
|
|
@ -173,8 +172,6 @@ impl Services {
|
||||||
.len();
|
.len();
|
||||||
let stateinfo_cache =
|
let stateinfo_cache =
|
||||||
self.rooms.state_compressor.stateinfo_cache.lock().unwrap().len();
|
self.rooms.state_compressor.stateinfo_cache.lock().unwrap().len();
|
||||||
let lasttimelinecount_cache =
|
|
||||||
self.rooms.timeline.lasttimelinecount_cache.lock().await.len();
|
|
||||||
let roomid_spacechunk_cache =
|
let roomid_spacechunk_cache =
|
||||||
self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
|
self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
|
||||||
|
|
||||||
|
|
@ -184,7 +181,6 @@ lazy_load_waiting: {lazy_load_waiting}
|
||||||
server_visibility_cache: {server_visibility_cache}
|
server_visibility_cache: {server_visibility_cache}
|
||||||
user_visibility_cache: {user_visibility_cache}
|
user_visibility_cache: {user_visibility_cache}
|
||||||
stateinfo_cache: {stateinfo_cache}
|
stateinfo_cache: {stateinfo_cache}
|
||||||
lasttimelinecount_cache: {lasttimelinecount_cache}
|
|
||||||
roomid_spacechunk_cache: {roomid_spacechunk_cache}"
|
roomid_spacechunk_cache: {roomid_spacechunk_cache}"
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
@ -212,9 +208,6 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}"
|
||||||
if amount > 3 {
|
if amount > 3 {
|
||||||
self.rooms.state_compressor.stateinfo_cache.lock().unwrap().clear();
|
self.rooms.state_compressor.stateinfo_cache.lock().unwrap().clear();
|
||||||
}
|
}
|
||||||
if amount > 4 {
|
|
||||||
self.rooms.timeline.lasttimelinecount_cache.lock().await.clear();
|
|
||||||
}
|
|
||||||
if amount > 5 {
|
if amount > 5 {
|
||||||
self.rooms.spaces.roomid_spacechunk_cache.lock().await.clear();
|
self.rooms.spaces.roomid_spacechunk_cache.lock().await.clear();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ mod data;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
cmp::Ordering,
|
cmp::Ordering,
|
||||||
collections::{BTreeMap, HashMap, HashSet},
|
collections::{BTreeMap, HashSet},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -23,12 +23,11 @@ use ruma::{
|
||||||
serde::Base64,
|
serde::Base64,
|
||||||
state_res::{self, Event, RoomVersion},
|
state_res::{self, Event, RoomVersion},
|
||||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId,
|
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId,
|
||||||
OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId,
|
OwnedEventId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
ServerName, UserId,
|
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use tokio::sync::{Mutex, MutexGuard, RwLock};
|
use tokio::sync::{MutexGuard, RwLock};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
|
@ -92,8 +91,6 @@ impl Ord for PduCount {
|
||||||
|
|
||||||
pub(crate) struct Service {
|
pub(crate) struct Service {
|
||||||
pub(crate) db: &'static dyn Data,
|
pub(crate) db: &'static dyn Data,
|
||||||
|
|
||||||
pub(crate) lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue