limit total events examined in /messages

This commit is contained in:
Benjamin Lee 2024-06-03 10:37:19 -07:00
parent 93ad93a36b
commit 2bcd357db2
No known key found for this signature in database
GPG key ID: FB9624E2885D55A4
2 changed files with 20 additions and 1 deletions

View file

@ -15,7 +15,7 @@ use ruma::{
use crate::{
service::{pdu::PduBuilder, rooms::timeline::PduCount},
services, utils,
utils::filter::CompiledRoomEventFilter,
utils::filter::{load_limit, CompiledRoomEventFilter},
Ar, Error, Ra, Result,
};
@ -194,6 +194,7 @@ pub(crate) async fn get_message_events_route(
.rooms
.timeline
.pdus_after(sender_user, &body.room_id, from)?
.take(load_limit(limit))
.filter_map(Result::ok)
.filter(|(_, pdu)| {
services()
@ -250,6 +251,7 @@ pub(crate) async fn get_message_events_route(
.rooms
.timeline
.pdus_until(sender_user, &body.room_id, from)?
.take(load_limit(limit))
.filter_map(Result::ok)
.filter(|(_, pdu)| {
services()

View file

@ -18,6 +18,23 @@ use ruma::{api::client::filter::RoomEventFilter, RoomId};
use crate::Error;
// 'DoS' is not a type
#[allow(clippy::doc_markdown)]
/// Returns the total limit of events to example when evaluating a filter.
///
/// When a filter matches only a very small fraction of available events, we may
/// need to example a very large number of events before we find enough allowed
/// events to fill the supplied limit. This is a possible DoS vector, and a
/// performance issue for legitimate requests. To avoid this, we put a "load
/// limit" on the total number of events that will be examined. This value is
/// always higher than the original event limit.
pub(crate) fn load_limit(limit: usize) -> usize {
// the 2xlimit value was pulled from synapse, and no real performance
// measurement has been done on our side yet to determine whether it's
// appropriate.
limit.saturating_mul(2)
}
/// Structure for testing against an allowlist and a denylist with a single
/// `HashSet` lookup.
///