make database config a sum type

This way we don't need to construct the entire configuration to load a
database or database engine.

The other advantage is that it allows having options that are unique to
each database backend.

The one thing I don't like about this is `DatabaseConfig::path`, whose
existence implies all databases will have a file path, which is not
true for out-of-process databases. The only thing this is really used
for is creating the media directory. I think we should restructure the
configuration in the future to resolve this.
This commit is contained in:
Charles Hall 2024-10-08 19:20:58 -07:00
parent f5ba3e3062
commit 153e3e4c93
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
6 changed files with 88 additions and 83 deletions

View file

@ -76,7 +76,7 @@ pub(crate) async fn run(args: ServeArgs) -> Result<(), error::ServeCommand> {
info!("Loading database");
let db = Box::leak(Box::new(
KeyValueDatabase::load_or_create(&config)
KeyValueDatabase::load_or_create(&config.database)
.map_err(Error::DatabaseError)?,
));

View file

@ -216,35 +216,53 @@ impl Default for TurnConfig {
}
}
#[derive(Clone, Copy, Debug, Deserialize)]
#[serde(rename_all = "lowercase")]
pub(crate) enum DatabaseBackend {
#[cfg(feature = "rocksdb")]
Rocksdb,
#[cfg(feature = "sqlite")]
Sqlite,
#[cfg(feature = "rocksdb")]
#[derive(Clone, Debug, Deserialize)]
pub(crate) struct RocksdbConfig {
pub(crate) path: PathBuf,
#[serde(default = "default_rocksdb_max_open_files")]
pub(crate) max_open_files: i32,
#[serde(default = "default_rocksdb_cache_capacity_bytes")]
pub(crate) cache_capacity_bytes: usize,
}
impl Display for DatabaseBackend {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
#[cfg(feature = "sqlite")]
#[derive(Clone, Debug, Deserialize)]
pub(crate) struct SqliteConfig {
pub(crate) path: PathBuf,
#[serde(default = "default_sqlite_cache_capacity_kilobytes")]
pub(crate) cache_capacity_kilobytes: u32,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "lowercase", tag = "backend")]
pub(crate) enum DatabaseConfig {
#[cfg(feature = "rocksdb")]
Rocksdb(RocksdbConfig),
#[cfg(feature = "sqlite")]
Sqlite(SqliteConfig),
}
impl DatabaseConfig {
pub(crate) fn path(&self) -> &Path {
match self {
#[cfg(feature = "rocksdb")]
DatabaseBackend::Rocksdb => write!(f, "RocksDB"),
DatabaseConfig::Rocksdb(x) => &x.path,
#[cfg(feature = "sqlite")]
DatabaseBackend::Sqlite => write!(f, "SQLite"),
DatabaseConfig::Sqlite(x) => &x.path,
}
}
}
#[derive(Clone, Debug, Deserialize)]
pub(crate) struct DatabaseConfig {
pub(crate) backend: DatabaseBackend,
pub(crate) path: String,
#[serde(default = "default_db_cache_capacity_mb")]
pub(crate) cache_capacity_mb: f64,
#[cfg(feature = "rocksdb")]
#[serde(default = "default_rocksdb_max_open_files")]
pub(crate) rocksdb_max_open_files: i32,
impl Display for DatabaseConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
#[cfg(feature = "rocksdb")]
DatabaseConfig::Rocksdb(_) => write!(f, "RocksDB"),
#[cfg(feature = "sqlite")]
DatabaseConfig::Sqlite(_) => write!(f, "SQLite"),
}
}
}
#[derive(Clone, Debug, Default, Deserialize)]
@ -377,10 +395,6 @@ fn default_port() -> u16 {
6167
}
fn default_db_cache_capacity_mb() -> f64 {
300.0
}
fn default_cache_capacity_modifier() -> f64 {
1.0
}
@ -390,6 +404,16 @@ fn default_rocksdb_max_open_files() -> i32 {
1000
}
#[cfg(feature = "rocksdb")]
fn default_rocksdb_cache_capacity_bytes() -> usize {
300 * 1024 * 1024
}
#[cfg(feature = "sqlite")]
fn default_sqlite_cache_capacity_kilobytes() -> u32 {
300 * 1024
}
fn default_pdu_cache_capacity() -> usize {
150_000
}

View file

@ -14,7 +14,7 @@ use ruma::{
use tracing::{debug, error, info, info_span, warn, Instrument};
use crate::{
config::DatabaseBackend,
config::DatabaseConfig,
service::{
media::MediaFileKey,
rooms::{
@ -22,7 +22,7 @@ use crate::{
state_compressor::CompressedStateEvent,
},
},
services, utils, Config, Error, Result,
services, utils, Error, Result,
};
pub(crate) mod abstraction;
@ -233,20 +233,20 @@ pub(crate) struct KeyValueDatabase {
impl KeyValueDatabase {
pub(crate) fn load_or_create_engine(
config: &Config,
config: &DatabaseConfig,
) -> Result<Arc<dyn KeyValueDatabaseEngine>> {
#[cfg(not(any(feature = "rocksdb", feature = "sqlite")))]
return Err(Error::BadConfig(
"Compiled without support for any databases",
));
let x: Arc<dyn KeyValueDatabaseEngine> = match config.database.backend {
let x: Arc<dyn KeyValueDatabaseEngine> = match config {
#[cfg(feature = "sqlite")]
DatabaseBackend::Sqlite => {
DatabaseConfig::Sqlite(config) => {
Arc::new(Arc::new(abstraction::sqlite::Engine::open(config)?))
}
#[cfg(feature = "rocksdb")]
DatabaseBackend::Rocksdb => {
DatabaseConfig::Rocksdb(config) => {
Arc::new(Arc::new(abstraction::rocksdb::Engine::open(config)?))
}
};
@ -260,7 +260,9 @@ impl KeyValueDatabase {
allow(unreachable_code)
)]
#[allow(clippy::too_many_lines)]
pub(crate) fn load_or_create(config: &Config) -> Result<KeyValueDatabase> {
pub(crate) fn load_or_create(
config: &DatabaseConfig,
) -> Result<KeyValueDatabase> {
let builder = Self::load_or_create_engine(config)?;
let db = Self {
@ -943,7 +945,7 @@ impl KeyValueDatabase {
);
info!(
backend = %services().globals.config.database.backend,
backend = %services().globals.config.database,
version = latest_database_version,
"Loaded database",
);
@ -956,7 +958,7 @@ impl KeyValueDatabase {
services().admin.create_admin_room().await?;
info!(
backend = %services().globals.config.database.backend,
backend = %services().globals.config.database,
version = latest_database_version,
"Created new database",
);

View file

@ -12,10 +12,8 @@ use rocksdb::{
};
use tracing::Level;
use super::{
super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree,
};
use crate::{utils, Result};
use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree};
use crate::{config::RocksdbConfig, utils, Result};
pub(crate) struct Engine {
rocks: DBWithThreadMode<MultiThreaded>,
@ -70,43 +68,35 @@ fn db_options(max_open_files: i32, rocksdb_cache: &Cache) -> Options {
}
impl Engine {
pub(crate) fn open(config: &Config) -> Result<Self> {
pub(crate) fn open(config: &RocksdbConfig) -> Result<Self> {
#[allow(
clippy::as_conversions,
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
let cache_capacity_bytes =
(config.database.cache_capacity_mb * 1024.0 * 1024.0) as usize;
let rocksdb_cache = Cache::new_lru_cache(cache_capacity_bytes);
let rocksdb_cache = Cache::new_lru_cache(config.cache_capacity_bytes);
let db_opts =
db_options(config.database.rocksdb_max_open_files, &rocksdb_cache);
let db_opts = db_options(config.max_open_files, &rocksdb_cache);
let cfs = DBWithThreadMode::<MultiThreaded>::list_cf(
&db_opts,
&config.database.path,
)
.map(|x| x.into_iter().collect::<HashSet<_>>())
.unwrap_or_default();
let cfs =
DBWithThreadMode::<MultiThreaded>::list_cf(&db_opts, &config.path)
.map(|x| x.into_iter().collect::<HashSet<_>>())
.unwrap_or_default();
let db = DBWithThreadMode::<MultiThreaded>::open_cf_descriptors(
&db_opts,
&config.database.path,
&config.path,
cfs.iter().map(|name| {
ColumnFamilyDescriptor::new(
name,
db_options(
config.database.rocksdb_max_open_files,
&rocksdb_cache,
),
db_options(config.max_open_files, &rocksdb_cache),
)
}),
)?;
Ok(Engine {
rocks: db,
max_open_files: config.database.rocksdb_max_open_files,
max_open_files: config.max_open_files,
cache: rocksdb_cache,
old_cfs: cfs,
new_cfs: Mutex::default(),

View file

@ -13,7 +13,7 @@ use thread_local::ThreadLocal;
use tracing::debug;
use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree};
use crate::{database::Config, Result};
use crate::{config::SqliteConfig, Result};
thread_local! {
static READ_CONNECTION: RefCell<Option<&'static Connection>> =
@ -63,39 +63,26 @@ pub(crate) struct Engine {
read_iterator_conn_tls: ThreadLocal<Connection>,
path: PathBuf,
cache_size_per_thread: u32,
cache_capacity_kilobytes: u32,
}
impl Engine {
pub(crate) fn open(config: &Config) -> Result<Self> {
fs::create_dir_all(&config.database.path)?;
pub(crate) fn open(config: &SqliteConfig) -> Result<Self> {
fs::create_dir_all(&config.path)?;
let path = Path::new(&config.database.path).join("sqlite.db");
let path = config.path.join("sqlite.db");
// calculates cache-size per permanent connection
// 1. convert MB to KiB
// 2. divide by permanent connections + permanent iter connections +
// write connection
// 3. round down to nearest integer
#[allow(
clippy::as_conversions,
clippy::cast_possible_truncation,
clippy::cast_precision_loss,
clippy::cast_sign_loss
)]
let cache_size_per_thread =
((config.database.cache_capacity_mb * 1024.0)
/ ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32;
let writer =
Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
let writer = Mutex::new(Engine::prepare_conn(
&path,
config.cache_capacity_kilobytes,
)?);
Ok(Engine {
writer,
read_conn_tls: ThreadLocal::new(),
read_iterator_conn_tls: ThreadLocal::new(),
path,
cache_size_per_thread,
cache_capacity_kilobytes: config.cache_capacity_kilobytes,
})
}
@ -121,13 +108,15 @@ impl Engine {
fn read_lock(&self) -> &Connection {
self.read_conn_tls.get_or(|| {
Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()
Self::prepare_conn(&self.path, self.cache_capacity_kilobytes)
.unwrap()
})
}
fn read_lock_iterator(&self) -> &Connection {
self.read_iterator_conn_tls.get_or(|| {
Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()
Self::prepare_conn(&self.path, self.cache_capacity_kilobytes)
.unwrap()
})
}

View file

@ -609,7 +609,7 @@ impl Service {
pub(crate) fn get_media_folder(&self) -> PathBuf {
let mut r = PathBuf::new();
r.push(self.config.database.path.clone());
r.push(self.config.database.path());
r.push("media");
r
}