fix(keys): only use keys valid at the time of PDU or transaction, and actually refresh keys

Previously, we only fetched keys once, only requesting them again if we have any missing, allowing for ancient keys to be used to sign PDUs and transactions
Now we refresh keys that either have or are about to expire, preventing attacks that make use of leaked private keys of a homeserver
We also ensure that when validating PDUs or transactions, that they are valid at the origin_server_ts or time of us receiving the transaction respectfully
As to not break event authorization for old rooms, we need to keep old keys around
We move verify_keys which we no longer see in direct requests to the origin to old_verify_keys
We keep old_verify_keys indefinitely as mentioned above, as to not break event authorization (at least until a future MSC addresses this)

Original patch by Matthias. Benjamin just rebased it onto grapevine and
fixed clippy/rustc warnings.

Co-authored-by: Benjamin Lee <benjamin@computer.surgery>
This commit is contained in:
Matthias Ahouansou 2024-06-09 11:15:49 +01:00 committed by Charles Hall
parent da99b0706e
commit 9087da91db
No known key found for this signature in database
GPG key ID: 7B8E0645816E07CF
8 changed files with 610 additions and 214 deletions

View file

@ -25,9 +25,9 @@ use ruma::{
}, },
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
serde::Base64, state_res, CanonicalJsonObject, CanonicalJsonValue, EventId,
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, OwnedUserId, RoomId, RoomVersionId, UserId,
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -35,7 +35,10 @@ use tracing::{debug, error, info, warn};
use super::get_alias_helper; use super::get_alias_helper;
use crate::{ use crate::{
service::pdu::{gen_event_id_canonical_json, PduBuilder}, service::{
globals::SigningKeys,
pdu::{gen_event_id_canonical_json, PduBuilder},
},
services, utils, Ar, Error, PduEvent, Ra, Result, services, utils, Ar, Error, PduEvent, Ra, Result,
}; };
@ -1192,7 +1195,7 @@ async fn make_join_request(
async fn validate_and_add_event_id( async fn validate_and_add_event_id(
pdu: &RawJsonValue, pdu: &RawJsonValue,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<(OwnedEventId, CanonicalJsonObject)> { ) -> Result<(OwnedEventId, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()) let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get())
.map_err(|e| { .map_err(|e| {
@ -1235,11 +1238,40 @@ async fn validate_and_add_event_id(
} }
} }
if let Err(e) = ruma::signatures::verify_event( let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
&*pub_key_map.read().await, error!("Invalid PDU, no origin_server_ts field");
&value, Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Time must be after the unix epoch",
)
})?)
};
let unfiltered_keys = (*pub_key_map.read().await).clone();
let keys = services().globals.filter_keys_server_map(
unfiltered_keys,
origin_server_ts,
room_version, room_version,
) { );
if let Err(e) = ruma::signatures::verify_event(&keys, &value, room_version)
{
warn!("Event {} failed verification {:?} {}", event_id, pdu, e); warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
back_off(event_id).await; back_off(event_id).await;
return Err(Error::BadServerResponse("Event failed verification.")); return Err(Error::BadServerResponse("Event failed verification."));

View file

@ -21,7 +21,8 @@ use ruma::{
OutgoingResponse, OutgoingResponse,
}, },
server_util::authorization::XMatrix, server_util::authorization::XMatrix,
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId,
OwnedServerName, OwnedUserId, UserId,
}; };
use serde::Deserialize; use serde::Deserialize;
use tracing::{debug, error, warn}; use tracing::{debug, error, warn};
@ -260,6 +261,7 @@ async fn ar_from_request_inner(
.fetch_signing_keys( .fetch_signing_keys(
&x_matrix.origin, &x_matrix.origin,
vec![x_matrix.key.to_string()], vec![x_matrix.key.to_string()],
false,
) )
.await; .await;
@ -274,9 +276,18 @@ async fn ar_from_request_inner(
} }
}; };
// Only verify_keys that are currently valid should be used for
// validating requests as per MSC4029
let pub_key_map = BTreeMap::from_iter([( let pub_key_map = BTreeMap::from_iter([(
x_matrix.origin.as_str().to_owned(), x_matrix.origin.as_str().to_owned(),
keys, if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() {
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect()
} else {
BTreeMap::new()
},
)]); )]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) match ruma::signatures::verify_json(&pub_key_map, &request_map)

View file

@ -1,17 +1,18 @@
use std::collections::{BTreeMap, HashMap}; use std::collections::HashMap;
use async_trait::async_trait; use async_trait::async_trait;
use futures_util::{stream::FuturesUnordered, StreamExt}; use futures_util::{stream::FuturesUnordered, StreamExt};
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{ use ruma::{
api::federation::discovery::{ServerSigningKeys, VerifyKey}, api::federation::discovery::{OldVerifyKey, ServerSigningKeys},
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, DeviceId, ServerName, UserId,
UserId,
}; };
use crate::{ use crate::{
database::KeyValueDatabase, service, services, utils, Error, Result, database::KeyValueDatabase,
service::{self, globals::SigningKeys},
services, utils, Error, Result,
}; };
pub(crate) const COUNTER: &[u8] = b"c"; pub(crate) const COUNTER: &[u8] = b"c";
@ -240,47 +241,97 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
self.global.remove(b"keypair") self.global.remove(b"keypair")
} }
fn add_signing_key( fn add_signing_key_from_trusted_server(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> { ) -> Result<SigningKeys> {
// Not atomic, but this is not critical let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
let mut keys = signingkeys Ok(
.and_then(|keys| serde_json::from_slice(&keys).ok()) if let Some(mut prev_keys) = prev_keys.and_then(|keys| {
.unwrap_or_else(|| { serde_json::from_slice::<ServerSigningKeys>(&keys).ok()
// Just insert "now", it doesn't matter }) {
ServerSigningKeys::new( let ServerSigningKeys {
origin.to_owned(), verify_keys,
MilliSecondsSinceUnixEpoch::now(), old_verify_keys,
) ..
}); } = new_keys;
let ServerSigningKeys { prev_keys.verify_keys.extend(verify_keys);
verify_keys, prev_keys.old_verify_keys.extend(old_verify_keys);
old_verify_keys, prev_keys.valid_until_ts = new_keys.valid_until_ts;
..
} = new_keys;
keys.verify_keys.extend(verify_keys); self.server_signingkeys.insert(
keys.old_verify_keys.extend(old_verify_keys); origin.as_bytes(),
&serde_json::to_vec(&prev_keys)
.expect("serversigningkeys can be serialized"),
)?;
self.server_signingkeys.insert( prev_keys.into()
origin.as_bytes(), } else {
&serde_json::to_vec(&keys) self.server_signingkeys.insert(
.expect("serversigningkeys can be serialized"), origin.as_bytes(),
)?; &serde_json::to_vec(&new_keys)
.expect("serversigningkeys can be serialized"),
)?;
let mut tree = keys.verify_keys; new_keys.into()
tree.extend( },
keys.old_verify_keys )
.into_iter() }
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
Ok(tree) fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
let prev_keys = self.server_signingkeys.get(origin.as_bytes())?;
Ok(
if let Some(mut prev_keys) = prev_keys.and_then(|keys| {
serde_json::from_slice::<ServerSigningKeys>(&keys).ok()
}) {
let ServerSigningKeys {
verify_keys,
old_verify_keys,
..
} = new_keys;
// Moving `verify_keys` no longer present to `old_verify_keys`
for (key_id, key) in prev_keys.verify_keys {
if !verify_keys.contains_key(&key_id) {
prev_keys.old_verify_keys.insert(
key_id,
OldVerifyKey::new(
prev_keys.valid_until_ts,
key.key,
),
);
}
}
prev_keys.verify_keys = verify_keys;
prev_keys.old_verify_keys.extend(old_verify_keys);
prev_keys.valid_until_ts = new_keys.valid_until_ts;
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&prev_keys)
.expect("serversigningkeys can be serialized"),
)?;
prev_keys.into()
} else {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&new_keys)
.expect("serversigningkeys can be serialized"),
)?;
new_keys.into()
},
)
} }
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
@ -288,21 +339,11 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
fn signing_keys_for( fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> { ) -> Result<Option<SigningKeys>> {
let signingkeys = self let signingkeys =
.server_signingkeys self.server_signingkeys.get(origin.as_bytes())?.and_then(|bytes| {
.get(origin.as_bytes())? serde_json::from_slice::<SigningKeys>(&bytes).ok()
.and_then(|bytes| serde_json::from_slice(&bytes).ok()) });
.map(|keys: ServerSigningKeys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
})
.unwrap_or_default();
Ok(signingkeys) Ok(signingkeys)
} }

View file

@ -28,8 +28,9 @@ use ruma::{
}, },
TimelineEventType, TimelineEventType,
}, },
EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, signatures::verify_json,
ServerName, UserId, EventId, MilliSecondsSinceUnixEpoch, OwnedRoomAliasId, OwnedRoomId,
RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use serde_json::value::to_raw_value; use serde_json::value::to_raw_value;
use tokio::sync::{mpsc, Mutex, RwLock}; use tokio::sync::{mpsc, Mutex, RwLock};
@ -1060,25 +1061,55 @@ impl Service {
services() services()
.rooms .rooms
.event_handler .event_handler
// Generally we shouldn't be checking against
// expired keys unless required, so in the admin
// room it might be best to not allow expired
// keys
.fetch_required_signing_keys( .fetch_required_signing_keys(
&value, &value,
&pub_key_map, &pub_key_map
) )
.await?; .await?;
let pub_key_map = pub_key_map.read().await; let mut expired_key_map = BTreeMap::new();
match ruma::signatures::verify_json( let mut valid_key_map = BTreeMap::new();
&pub_key_map,
&value, for (server, keys) in pub_key_map.into_inner() {
) { if keys.valid_until_ts
Ok(()) => RoomMessageEventContent::text_plain( > MilliSecondsSinceUnixEpoch::now()
{
valid_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
} else {
expired_key_map.insert(
server,
keys.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect(),
);
}
}
if verify_json(&valid_key_map, &value).is_ok() {
RoomMessageEventContent::text_plain(
"Signature correct", "Signature correct",
), )
Err(e) => RoomMessageEventContent::text_plain( } else if let Err(e) =
format!( verify_json(&expired_key_map, &value)
"Signature verification failed: {e}" {
), RoomMessageEventContent::text_plain(format!(
), "Signature verification failed: {e}"
))
} else {
RoomMessageEventContent::text_plain(
"Signature correct (with expired keys)",
)
} }
} }
Err(e) => RoomMessageEventContent::text_plain(format!( Err(e) => RoomMessageEventContent::text_plain(format!(

View file

@ -15,7 +15,7 @@ use std::{
}; };
use base64::{engine::general_purpose, Engine as _}; use base64::{engine::general_purpose, Engine as _};
pub(crate) use data::Data; pub(crate) use data::{Data, SigningKeys};
use futures_util::FutureExt; use futures_util::FutureExt;
use hyper::service::Service as _; use hyper::service::Service as _;
use hyper_util::{ use hyper_util::{
@ -23,10 +23,9 @@ use hyper_util::{
}; };
use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use ruma::{ use ruma::{
api::federation::discovery::{ServerSigningKeys, VerifyKey}, api::federation::discovery::ServerSigningKeys, serde::Base64, DeviceId,
serde::Base64, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName,
DeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomVersionId, ServerName, UserId,
OwnedServerSigningKeyId, RoomVersionId, ServerName, UserId,
}; };
use tokio::sync::{broadcast, Mutex, RwLock, Semaphore}; use tokio::sync::{broadcast, Mutex, RwLock, Semaphore};
use tracing::{error, info, Instrument}; use tracing::{error, info, Instrument};
@ -379,38 +378,90 @@ impl Service {
room_versions room_versions
} }
/// TODO: the key valid until timestamp is only honored in room version > 4
/// Remove the outdated keys and insert the new ones.
///
/// This doesn't actually check that the keys provided are newer than the /// This doesn't actually check that the keys provided are newer than the
/// old set. /// old set.
pub(crate) fn add_signing_key( pub(crate) fn add_signing_key_from_trusted_server(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> { ) -> Result<SigningKeys> {
self.db.add_signing_key(origin, new_keys) self.db.add_signing_key_from_trusted_server(origin, new_keys)
} }
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// Same as `from_trusted_server`, except it will move active keys not
/// for the server. /// present in `new_keys` to `old_signing_keys`
pub(crate) fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys> {
self.db.add_signing_key_from_origin(origin, new_keys)
}
/// This returns Ok(None) when there are no keys found for the server.
pub(crate) fn signing_keys_for( pub(crate) fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> { ) -> Result<Option<SigningKeys>> {
let mut keys = self.db.signing_keys_for(origin)?; Ok(self.db.signing_keys_for(origin)?.or_else(|| {
if origin == self.server_name() { (origin == self.server_name()).then(SigningKeys::load_own_keys)
keys.insert( }))
format!("ed25519:{}", services().globals.keypair().version()) }
.try_into()
.expect("found invalid server signing keys in DB"),
VerifyKey {
key: Base64::new(self.keypair.public_key().to_vec()),
},
);
}
Ok(keys) /// Filters the key map of multiple servers down to keys that should be
/// accepted given the expiry time, room version, and timestamp of the
/// paramters
#[allow(clippy::unused_self)]
pub(crate) fn filter_keys_server_map(
&self,
keys: BTreeMap<String, SigningKeys>,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> BTreeMap<String, BTreeMap<String, Base64>> {
keys.into_iter()
.filter_map(|(server, keys)| {
self.filter_keys_single_server(keys, timestamp, room_version_id)
.map(|keys| (server, keys))
})
.collect()
}
/// Filters the keys of a single server down to keys that should be accepted
/// given the expiry time, room version, and timestamp of the paramters
#[allow(clippy::unused_self)]
pub(crate) fn filter_keys_single_server(
&self,
keys: SigningKeys,
timestamp: MilliSecondsSinceUnixEpoch,
room_version_id: &RoomVersionId,
) -> Option<BTreeMap<String, Base64>> {
let all_valid = keys.valid_until_ts > timestamp
// valid_until_ts MUST be ignored in room versions 1, 2, 3, and 4.
// https://spec.matrix.org/v1.10/server-server-api/#get_matrixkeyv2server
|| matches!(room_version_id, RoomVersionId::V1
| RoomVersionId::V2
| RoomVersionId::V4
| RoomVersionId::V3);
all_valid.then(|| {
// Given that either the room version allows stale keys, or the
// valid_until_ts is in the future, all verify_keys are
// valid
let mut map: BTreeMap<_, _> = keys
.verify_keys
.into_iter()
.map(|(id, key)| (id, key.key))
.collect();
map.extend(keys.old_verify_keys.into_iter().filter_map(
|(id, key)| {
// Even on old room versions, we don't allow old keys if
// they are expired
(key.expired_ts > timestamp).then_some((id, key.key))
},
));
map
})
} }
pub(crate) fn database_version(&self) -> Result<u64> { pub(crate) fn database_version(&self) -> Result<u64> {

View file

@ -1,13 +1,75 @@
use std::collections::BTreeMap; use std::{
collections::BTreeMap,
time::{Duration, SystemTime},
};
use async_trait::async_trait; use async_trait::async_trait;
use ruma::{ use ruma::{
api::federation::discovery::{ServerSigningKeys, VerifyKey}, api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey},
serde::Base64,
signatures::Ed25519KeyPair, signatures::Ed25519KeyPair,
DeviceId, OwnedServerSigningKeyId, ServerName, UserId, DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId,
}; };
use serde::Deserialize;
use crate::Result; use crate::{services, Result};
/// Similar to [`ServerSigningKeys`], but drops a few unnecessary fields we
/// don't require post-validation
#[derive(Deserialize, Debug, Clone)]
pub(crate) struct SigningKeys {
pub(crate) verify_keys: BTreeMap<String, VerifyKey>,
pub(crate) old_verify_keys: BTreeMap<String, OldVerifyKey>,
pub(crate) valid_until_ts: MilliSecondsSinceUnixEpoch,
}
impl SigningKeys {
/// Creates the `SigningKeys` struct, using the keys of the current server
pub(crate) fn load_own_keys() -> Self {
let mut keys = Self {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
};
keys.verify_keys.insert(
format!("ed25519:{}", services().globals.keypair().version()),
VerifyKey {
key: Base64::new(
services().globals.keypair.public_key().to_vec(),
),
},
);
keys
}
}
impl From<ServerSigningKeys> for SigningKeys {
fn from(value: ServerSigningKeys) -> Self {
let ServerSigningKeys {
verify_keys,
old_verify_keys,
valid_until_ts,
..
} = value;
Self {
verify_keys: verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
old_verify_keys: old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key))
.collect(),
valid_until_ts,
}
}
}
#[async_trait] #[async_trait]
pub(crate) trait Data: Send + Sync { pub(crate) trait Data: Send + Sync {
@ -20,18 +82,29 @@ pub(crate) trait Data: Send + Sync {
fn clear_caches(&self, amount: u32); fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result<Ed25519KeyPair>; fn load_keypair(&self) -> Result<Ed25519KeyPair>;
fn remove_keypair(&self) -> Result<()>; fn remove_keypair(&self) -> Result<()>;
fn add_signing_key( /// Only extends the cached keys, not moving any verify_keys to
/// old_verify_keys, as if we suddenly recieve requests from the origin
/// server, we want to be able to accept requests from them
fn add_signing_key_from_trusted_server(
&self, &self,
origin: &ServerName, origin: &ServerName,
new_keys: ServerSigningKeys, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>; ) -> Result<SigningKeys>;
/// Extends cached keys, as well as moving verify_keys that are not present
/// in these new keys to old_verify_keys, so that potnetially
/// comprimised keys cannot be used to make requests
fn add_signing_key_from_origin(
&self,
origin: &ServerName,
new_keys: ServerSigningKeys,
) -> Result<SigningKeys>;
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server. /// for the server.
fn signing_keys_for( fn signing_keys_for(
&self, &self,
origin: &ServerName, origin: &ServerName,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>>; ) -> Result<Option<SigningKeys>>;
fn database_version(&self) -> Result<u64>; fn database_version(&self) -> Result<u64>;
fn bump_database_version(&self, new_version: u64) -> Result<()>; fn bump_database_version(&self, new_version: u64) -> Result<()>;
} }

View file

@ -31,7 +31,6 @@ use ruma::{
StateEventType, TimelineEventType, StateEventType, TimelineEventType,
}, },
int, int,
serde::Base64,
state_res::{self, RoomVersion, StateMap}, state_res::{self, RoomVersion, StateMap},
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, uint, CanonicalJsonObject, CanonicalJsonValue, EventId,
MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedServerSigningKeyId, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedServerSigningKeyId,
@ -43,8 +42,10 @@ use tracing::{debug, error, info, trace, warn};
use super::state_compressor::CompressedStateEvent; use super::state_compressor::CompressedStateEvent;
use crate::{ use crate::{
service::pdu, services, utils::debug_slice_truncated, Error, PduEvent, service::{globals::SigningKeys, pdu},
Result, services,
utils::debug_slice_truncated,
Error, PduEvent, Result,
}; };
pub(crate) struct Service; pub(crate) struct Service;
@ -87,7 +88,7 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
value: BTreeMap<String, CanonicalJsonValue>, value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool, is_timeline_event: bool,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// 0. Check the server is in the room // 0. Check the server is in the room
if !services().rooms.metadata.exists(room_id)? { if !services().rooms.metadata.exists(room_id)? {
@ -320,7 +321,7 @@ impl Service {
room_id: &'a RoomId, room_id: &'a RoomId,
mut value: BTreeMap<String, CanonicalJsonValue>, mut value: BTreeMap<String, CanonicalJsonValue>,
auth_events_known: bool, auth_events_known: bool,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
) -> AsyncRecursiveType< ) -> AsyncRecursiveType<
'a, 'a,
Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
@ -329,12 +330,6 @@ impl Service {
// 1.1. Remove unsigned field // 1.1. Remove unsigned field
value.remove("unsigned"); value.remove("unsigned");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch
// the corresponding signing keys
self.fetch_required_signing_keys(&value, pub_key_map).await?;
// 2. Check signatures, otherwise drop // 2. Check signatures, otherwise drop
// 3. check content hash, redact if doesn't match // 3. check content hash, redact if doesn't match
let create_event_content: RoomCreateEventContent = let create_event_content: RoomCreateEventContent =
@ -349,9 +344,56 @@ impl Service {
let room_version = RoomVersion::new(room_version_id) let room_version = RoomVersion::new(room_version_id)
.expect("room version is supported"); .expect("room version is supported");
// TODO: For RoomVersion6 we must check that Raw<..> is canonical,
// do we anywhere?
//
// https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch
// the corresponding signing keys
self.fetch_required_signing_keys(&value, pub_key_map).await?;
let origin_server_ts =
value.get("origin_server_ts").ok_or_else(|| {
error!("Invalid PDU, no origin_server_ts field");
Error::BadRequest(
ErrorKind::MissingParam,
"Invalid PDU, no origin_server_ts field",
)
})?;
let origin_server_ts: MilliSecondsSinceUnixEpoch = {
let ts = origin_server_ts.as_integer().ok_or_else(|| {
Error::BadRequest(
ErrorKind::InvalidParam,
"origin_server_ts must be an integer",
)
})?;
MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(
|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Time must be after the unix epoch",
)
},
)?)
};
let guard = pub_key_map.read().await; let guard = pub_key_map.read().await;
let pkey_map = (*guard).clone();
// Removing all the expired keys, unless the room version allows
// stale keys
let filtered_keys = services().globals.filter_keys_server_map(
pkey_map,
origin_server_ts,
room_version_id,
);
let mut val = match ruma::signatures::verify_event( let mut val = match ruma::signatures::verify_event(
&guard, &filtered_keys,
&value, &value,
room_version_id, room_version_id,
) { ) {
@ -526,7 +568,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
origin: &ServerName, origin: &ServerName,
room_id: &RoomId, room_id: &RoomId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<Option<Vec<u8>>> { ) -> Result<Option<Vec<u8>>> {
// Skip the PDU if we already have it as a timeline event // Skip the PDU if we already have it as a timeline event
if let Ok(Some(pduid)) = if let Ok(Some(pduid)) =
@ -1205,7 +1247,7 @@ impl Service {
create_event: &'a PduEvent, create_event: &'a PduEvent,
room_id: &'a RoomId, room_id: &'a RoomId,
room_version_id: &'a RoomVersionId, room_version_id: &'a RoomVersionId,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &'a RwLock<BTreeMap<String, SigningKeys>>,
) -> AsyncRecursiveType< ) -> AsyncRecursiveType<
'a, 'a,
Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>,
@ -1406,7 +1448,7 @@ impl Service {
create_event: &PduEvent, create_event: &PduEvent,
room_id: &RoomId, room_id: &RoomId,
room_version_id: &RoomVersionId, room_version_id: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
initial_set: Vec<Arc<EventId>>, initial_set: Vec<Arc<EventId>>,
) -> Result<( ) -> Result<(
Vec<Arc<EventId>>, Vec<Arc<EventId>>,
@ -1509,7 +1551,7 @@ impl Service {
pub(crate) async fn fetch_required_signing_keys( pub(crate) async fn fetch_required_signing_keys(
&self, &self,
event: &BTreeMap<String, CanonicalJsonValue>, event: &BTreeMap<String, CanonicalJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<()> { ) -> Result<()> {
let signatures = event let signatures = event
.get("signatures") .get("signatures")
@ -1541,6 +1583,7 @@ impl Service {
) )
})?, })?,
signature_ids, signature_ids,
true,
) )
.await; .await;
@ -1570,10 +1613,7 @@ impl Service {
BTreeMap<OwnedServerSigningKeyId, QueryCriteria>, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>,
>, >,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &mut RwLockWriteGuard< pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, SigningKeys>>,
'_,
BTreeMap<String, BTreeMap<String, Base64>>,
>,
) -> Result<()> { ) -> Result<()> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) let value: CanonicalJsonObject = serde_json::from_str(pdu.get())
.map_err(|e| { .map_err(|e| {
@ -1626,8 +1666,18 @@ impl Service {
let signature_ids = let signature_ids =
signature_object.keys().cloned().collect::<Vec<_>>(); signature_object.keys().cloned().collect::<Vec<_>>();
let contains_all_ids = |keys: &BTreeMap<String, Base64>| { let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| keys.contains_key(id)) signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
}; };
let origin = <&ServerName>::try_from(signature_server.as_str()) let origin = <&ServerName>::try_from(signature_server.as_str())
@ -1646,19 +1696,14 @@ impl Service {
trace!("Loading signing keys for {}", origin); trace!("Loading signing keys for {}", origin);
let result: BTreeMap<_, _> = services() if let Some(result) = services().globals.signing_keys_for(origin)? {
.globals if !contains_all_ids(&result) {
.signing_keys_for(origin)? trace!("Signing key not loaded for {}", origin);
.into_iter() servers.insert(origin.to_owned(), BTreeMap::new());
.map(|(k, v)| (k.to_string(), v.key)) }
.collect();
if !contains_all_ids(&result) { pub_key_map.insert(origin.to_string(), result);
trace!("Signing key not loaded for {}", origin);
servers.insert(origin.to_owned(), BTreeMap::new());
} }
pub_key_map.insert(origin.to_string(), result);
} }
Ok(()) Ok(())
@ -1669,7 +1714,7 @@ impl Service {
&self, &self,
event: &create_join_event::v2::Response, event: &create_join_event::v2::Response,
room_version: &RoomVersionId, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<()> { ) -> Result<()> {
let mut servers: BTreeMap< let mut servers: BTreeMap<
OwnedServerName, OwnedServerName,
@ -1741,10 +1786,10 @@ impl Service {
let result = services() let result = services()
.globals .globals
.add_signing_key(&k.server_name, k.clone())? .add_signing_key_from_trusted_server(
.into_iter() &k.server_name,
.map(|(k, v)| (k.to_string(), v.key)) k.clone(),
.collect::<BTreeMap<_, _>>(); )?;
pkm.insert(k.server_name.to_string(), result); pkm.insert(k.server_name.to_string(), result);
} }
@ -1778,12 +1823,9 @@ impl Service {
if let (Ok(get_keys_response), origin) = result { if let (Ok(get_keys_response), origin) = result {
info!("Result is from {origin}"); info!("Result is from {origin}");
if let Ok(key) = get_keys_response.server_key.deserialize() { if let Ok(key) = get_keys_response.server_key.deserialize() {
let result: BTreeMap<_, _> = services() let result = services()
.globals .globals
.add_signing_key(&origin, key)? .add_signing_key_from_origin(&origin, key)?;
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
pub_key_map pub_key_map
.write() .write()
.await .await
@ -1852,9 +1894,22 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
signature_ids: Vec<String>, signature_ids: Vec<String>,
) -> Result<BTreeMap<String, Base64>> { // Whether to ask for keys from trusted servers. Should be false when
let contains_all_ids = |keys: &BTreeMap<String, Base64>| { // getting keys for validating requests, as per MSC4029
signature_ids.iter().all(|id| keys.contains_key(id)) query_via_trusted_servers: bool,
) -> Result<SigningKeys> {
let contains_all_ids = |keys: &SigningKeys| {
signature_ids.iter().all(|id| {
keys.verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
|| keys
.old_verify_keys
.keys()
.map(ToString::to_string)
.any(|key_id| id == &key_id)
})
}; };
let permit = services() let permit = services()
@ -1921,20 +1976,53 @@ impl Service {
trace!("Loading signing keys from database"); trace!("Loading signing keys from database");
let mut result: BTreeMap<_, _> = services() let result = services().globals.signing_keys_for(origin)?;
.globals
.signing_keys_for(origin)?
.into_iter()
.map(|(k, v)| (k.to_string(), v.key))
.collect();
if contains_all_ids(&result) { let mut expires_soon_or_has_expired = false;
return Ok(result);
if let Some(result) = result.clone() {
let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000");
debug!(
"The threshhold is {:?}, found time is {:?} for server {}",
ts_threshold, result.valid_until_ts, origin
);
if contains_all_ids(&result) {
// We want to ensure that the keys remain valid by the time the
// other functions that handle signatures reach them
if result.valid_until_ts > ts_threshold {
debug!(
origin = %origin,
valid_until_ts = %result.valid_until_ts.get(),
"Keys for are deemed as valid, as they expire after threshold",
);
return Ok(result);
}
expires_soon_or_has_expired = true;
}
} }
let mut keys = result.unwrap_or_else(|| SigningKeys {
verify_keys: BTreeMap::new(),
old_verify_keys: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::now(),
});
// We want to set this to the max, and then lower it whenever we see
// older keys
keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000");
debug!("Fetching signing keys over federation"); debug!("Fetching signing keys over federation");
if let Some(server_key) = services() if let Some(mut server_key) = services()
.sending .sending
.send_federation_request( .send_federation_request(
origin, origin,
@ -1944,72 +2032,141 @@ impl Service {
.ok() .ok()
.and_then(|resp| resp.server_key.deserialize().ok()) .and_then(|resp| resp.server_key.deserialize().ok())
{ {
services().globals.add_signing_key(origin, server_key.clone())?; // Keys should only be valid for a maximum of seven days
server_key.valid_until_ts = server_key.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now() + Duration::from_secs(7 * 86400),
)
.expect("Should be valid until year 500,000,000"),
);
result.extend( services()
.globals
.add_signing_key_from_origin(origin, server_key.clone())?;
if keys.valid_until_ts > server_key.valid_until_ts {
keys.valid_until_ts = server_key.valid_until_ts;
}
keys.verify_keys.extend(
server_key server_key
.verify_keys .verify_keys
.into_iter() .into_iter()
.map(|(k, v)| (k.to_string(), v.key)), .map(|(id, key)| (id.to_string(), key)),
); );
result.extend( keys.old_verify_keys.extend(
server_key server_key
.old_verify_keys .old_verify_keys
.into_iter() .into_iter()
.map(|(k, v)| (k.to_string(), v.key)), .map(|(id, key)| (id.to_string(), key)),
); );
if contains_all_ids(&result) { if contains_all_ids(&keys) {
return Ok(result); return Ok(keys);
} }
} }
for server in services().globals.trusted_servers() { if query_via_trusted_servers {
debug!(trusted_server = %server, "Asking trusted server for signing keys"); for server in services().globals.trusted_servers() {
if let Some(server_keys) = services() debug!(
.sending trusted_server = %server,
.send_federation_request( origin = %origin,
server, "Asking trusted server for signing keys",
get_remote_server_keys::v2::Request::new( );
origin.to_owned(), if let Some(server_keys) = services()
MilliSecondsSinceUnixEpoch::from_system_time( .sending
SystemTime::now() .send_federation_request(
.checked_add(Duration::from_secs(3600)) server,
.expect("SystemTime to large"), get_remote_server_keys::v2::Request::new(
) origin.to_owned(),
.expect("time is valid"), MilliSecondsSinceUnixEpoch::from_system_time(
), SystemTime::now()
) .checked_add(Duration::from_secs(3600))
.await .expect("SystemTime to large"),
.ok() )
.map(|resp| { .expect("time is valid"),
resp.server_keys ),
.into_iter() )
.filter_map(|e| e.deserialize().ok()) .await
.collect::<Vec<_>>() .ok()
}) .map(|resp| {
{ resp.server_keys
trace!(?server_keys, "Got signing keys from trusted server");
for k in server_keys {
services().globals.add_signing_key(origin, k.clone())?;
result.extend(
k.verify_keys
.into_iter() .into_iter()
.map(|(k, v)| (k.to_string(), v.key)), .filter_map(|e| e.deserialize().ok())
.collect::<Vec<_>>()
})
{
trace!(
?server_keys,
"Got signing keys from trusted server"
); );
result.extend( for mut k in server_keys {
k.old_verify_keys // Half an hour should give plenty of time for the
.into_iter() // server to respond with keys that are still
.map(|(k, v)| (k.to_string(), v.key)), // valid, given we requested keys which are valid at
); // least an hour from now
} let in_half_hour =
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
+ Duration::from_secs(30 * 60),
)
.expect("Should be valid until year 500,000,000");
if k.valid_until_ts < in_half_hour {
// Keys should only be valid for a maximum of seven
// days
k.valid_until_ts = k.valid_until_ts.min(
MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
+ Duration::from_secs(7 * 86400),
)
.expect(
"Should be valid until year 500,000,000",
),
);
if contains_all_ids(&result) { if keys.valid_until_ts > k.valid_until_ts {
return Ok(result); keys.valid_until_ts = k.valid_until_ts;
}
services()
.globals
.add_signing_key_from_trusted_server(
origin,
k.clone(),
)?;
keys.verify_keys.extend(
k.verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
keys.old_verify_keys.extend(
k.old_verify_keys
.into_iter()
.map(|(id, key)| (id.to_string(), key)),
);
} else {
warn!(
origin = %origin,
valid_until = %k.valid_until_ts.get(),
"Server gave us keys older than we \
requested",
);
}
if contains_all_ids(&keys) {
return Ok(keys);
}
}
} }
} }
} }
// We should return these keys if fresher keys were not found
if expires_soon_or_has_expired {
info!(origin = %origin, "Returning stale keys");
return Ok(keys);
}
drop(permit); drop(permit);
back_off(signature_ids).await; back_off(signature_ids).await;

View file

@ -20,7 +20,6 @@ use ruma::{
GlobalAccountDataEventType, StateEventType, TimelineEventType, GlobalAccountDataEventType, StateEventType, TimelineEventType,
}, },
push::{Action, Ruleset, Tweak}, push::{Action, Ruleset, Tweak},
serde::Base64,
state_res::{self, Event, RoomVersion}, state_res::{self, Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId,
OwnedEventId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, OwnedEventId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId,
@ -35,6 +34,7 @@ use crate::{
api::server_server, api::server_server,
service::{ service::{
appservice::NamespaceRegex, appservice::NamespaceRegex,
globals::SigningKeys,
pdu::{EventHash, PduBuilder}, pdu::{EventHash, PduBuilder},
}, },
services, utils, Error, PduEvent, Result, services, utils, Error, PduEvent, Result,
@ -1292,7 +1292,7 @@ impl Service {
&self, &self,
origin: &ServerName, origin: &ServerName,
pdu: Box<RawJsonValue>, pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, pub_key_map: &RwLock<BTreeMap<String, SigningKeys>>,
) -> Result<()> { ) -> Result<()> {
let (event_id, value, room_id) = let (event_id, value, room_id) =
server_server::parse_incoming_pdu(&pdu)?; server_server::parse_incoming_pdu(&pdu)?;