From a061afa431cd21e85efda2ca9087c390d4d62874 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 26 Sep 2024 17:14:15 -0700 Subject: [PATCH] move `open` off of `KeyValueDatabaseEngine` You have to know the type to create the trait object anyway. Also, each backend has different configuration options, which means either passing all options to all backends despite them not needing it, or doing this. So I did this. --- src/database.rs | 4 +- src/database/abstraction.rs | 4 -- src/database/abstraction/rocksdb.rs | 10 ++-- src/database/abstraction/sqlite.rs | 76 ++++++++++++++--------------- 4 files changed, 45 insertions(+), 49 deletions(-) diff --git a/src/database.rs b/src/database.rs index 62346659..6a21e325 100644 --- a/src/database.rs +++ b/src/database.rs @@ -311,11 +311,11 @@ impl KeyValueDatabase { let x: Arc = match config.database.backend { #[cfg(feature = "sqlite")] DatabaseBackend::Sqlite => { - Arc::new(Arc::::open(config)?) + Arc::new(Arc::new(abstraction::sqlite::Engine::open(config)?)) } #[cfg(feature = "rocksdb")] DatabaseBackend::Rocksdb => { - Arc::new(Arc::::open(config)?) + Arc::new(Arc::new(abstraction::rocksdb::Engine::open(config)?)) } }; diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 63714cd3..c30e3829 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,10 +12,6 @@ pub(crate) mod rocksdb; pub(crate) mod watchers; pub(crate) trait KeyValueDatabaseEngine: Send + Sync { - #[cfg(any(feature = "sqlite", feature = "rocksdb"))] - fn open(config: &super::Config) -> Result - where - Self: Sized; fn open_tree(&self, name: &'static str) -> Result>; fn cleanup(&self) -> Result<()> { Ok(()) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 90cdb06a..43e84603 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -69,8 +69,8 @@ fn db_options(max_open_files: i32, rocksdb_cache: &Cache) -> Options { db_opts } -impl KeyValueDatabaseEngine for Arc { - fn open(config: &Config) -> Result { +impl Engine { + pub(crate) fn open(config: &Config) -> Result { #[allow( clippy::as_conversions, clippy::cast_sign_loss, @@ -104,15 +104,17 @@ impl KeyValueDatabaseEngine for Arc { }), )?; - Ok(Arc::new(Engine { + Ok(Engine { rocks: db, max_open_files: config.database.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, new_cfs: Mutex::default(), - })) + }) } +} +impl KeyValueDatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { let mut new_cfs = self.new_cfs.lock().expect("lock should not be poisoned"); diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 7b41fc79..1ed4f248 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -66,6 +66,43 @@ pub(crate) struct Engine { } impl Engine { + pub(crate) fn open(config: &Config) -> Result { + let path = Path::new(&config.database.path).join(format!( + "{}.db", + if config.conduit_compat { + "conduit" + } else { + "grapevine" + } + )); + + // calculates cache-size per permanent connection + // 1. convert MB to KiB + // 2. divide by permanent connections + permanent iter connections + + // write connection + // 3. round down to nearest integer + #[allow( + clippy::as_conversions, + clippy::cast_possible_truncation, + clippy::cast_precision_loss, + clippy::cast_sign_loss + )] + let cache_size_per_thread = + ((config.database.cache_capacity_mb * 1024.0) + / ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32; + + let writer = + Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); + + Ok(Engine { + writer, + read_conn_tls: ThreadLocal::new(), + read_iterator_conn_tls: ThreadLocal::new(), + path, + cache_size_per_thread, + }) + } + fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { let conn = Connection::open(path)?; @@ -109,45 +146,6 @@ impl Engine { } impl KeyValueDatabaseEngine for Arc { - fn open(config: &Config) -> Result { - let path = Path::new(&config.database.path).join(format!( - "{}.db", - if config.conduit_compat { - "conduit" - } else { - "grapevine" - } - )); - - // calculates cache-size per permanent connection - // 1. convert MB to KiB - // 2. divide by permanent connections + permanent iter connections + - // write connection - // 3. round down to nearest integer - #[allow( - clippy::as_conversions, - clippy::cast_possible_truncation, - clippy::cast_precision_loss, - clippy::cast_sign_loss - )] - let cache_size_per_thread = - ((config.database.cache_capacity_mb * 1024.0) - / ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32; - - let writer = - Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); - - let arc = Arc::new(Engine { - writer, - read_conn_tls: ThreadLocal::new(), - read_iterator_conn_tls: ThreadLocal::new(), - path, - cache_size_per_thread, - }); - - Ok(arc) - } - fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute( &format!(