avoid overhead when cache sizes are zero

Don't even try taking locks, inserting or removing anything, etc.
This commit is contained in:
Charles Hall 2024-10-08 22:28:52 -07:00
parent 1148c6004f
commit d42a5ec1f0
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
5 changed files with 184 additions and 174 deletions

View file

@ -32,7 +32,7 @@ pub(crate) struct Service {
#[allow(clippy::type_complexity)]
pub(crate) stateinfo_cache:
Mutex<LruCache<ShortStateHash, Vec<CompressedStateLayer>>>,
Option<Mutex<LruCache<ShortStateHash, Vec<CompressedStateLayer>>>>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
@ -78,7 +78,8 @@ impl Service {
) -> Self {
Self {
db,
stateinfo_cache: Mutex::new(LruCache::new(stateinfo_cache_size)),
stateinfo_cache: (stateinfo_cache_size > 0)
.then(|| Mutex::new(LruCache::new(stateinfo_cache_size))),
}
}
@ -92,11 +93,11 @@ impl Service {
) -> Result<Vec<CompressedStateLayer>> {
let lookup = Lookup::StateInfo;
if let Some(r) =
self.stateinfo_cache.lock().unwrap().get_mut(&shortstatehash)
{
METRICS.record_lookup(lookup, FoundIn::Cache);
return Ok(r.clone());
if let Some(cache) = &self.stateinfo_cache {
if let Some(r) = cache.lock().unwrap().get_mut(&shortstatehash) {
METRICS.record_lookup(lookup, FoundIn::Cache);
return Ok(r.clone());
}
}
let StateDiff {
@ -131,10 +132,10 @@ impl Service {
};
METRICS.record_lookup(lookup, FoundIn::Database);
self.stateinfo_cache
.lock()
.unwrap()
.insert(shortstatehash, response.clone());
if let Some(cache) = &self.stateinfo_cache {
cache.lock().unwrap().insert(shortstatehash, response.clone());
}
Ok(response)
}