mirror of
https://gitlab.computer.surgery/matrix/grapevine.git
synced 2025-12-17 15:51:23 +01:00
refactor fetch_unknown_prev_events
Early returns (or continues, in this case) good.
This commit is contained in:
parent
6cb7896e17
commit
d1370f9834
1 changed files with 43 additions and 44 deletions
|
|
@ -1442,7 +1442,7 @@ impl Service {
|
||||||
let mut amount = 0;
|
let mut amount = 0;
|
||||||
|
|
||||||
while let Some(prev_event_id) = todo_outlier_stack.pop() {
|
while let Some(prev_event_id) = todo_outlier_stack.pop() {
|
||||||
if let Some((pdu, json_opt)) = self
|
let Some((pdu, json_opt)) = self
|
||||||
.fetch_and_handle_outliers(
|
.fetch_and_handle_outliers(
|
||||||
origin,
|
origin,
|
||||||
&[prev_event_id.clone()],
|
&[prev_event_id.clone()],
|
||||||
|
|
@ -1452,51 +1452,50 @@ impl Service {
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.pop()
|
.pop()
|
||||||
{
|
else {
|
||||||
Self::check_room_id(room_id, &pdu)?;
|
|
||||||
|
|
||||||
if amount > services().globals.max_fetch_prev_events() {
|
|
||||||
// Max limit reached
|
|
||||||
warn!("Max prev event limit reached!");
|
|
||||||
graph.insert(prev_event_id.clone(), HashSet::new());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(json) = json_opt.or_else(|| {
|
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.outlier
|
|
||||||
.get_outlier_pdu_json(&prev_event_id)
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
}) {
|
|
||||||
if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts
|
|
||||||
{
|
|
||||||
amount += 1;
|
|
||||||
for prev_prev in &pdu.prev_events {
|
|
||||||
if !graph.contains_key(prev_prev) {
|
|
||||||
todo_outlier_stack.push(prev_prev.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
graph.insert(
|
|
||||||
prev_event_id.clone(),
|
|
||||||
pdu.prev_events.iter().cloned().collect(),
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// Time based check failed
|
|
||||||
graph.insert(prev_event_id.clone(), HashSet::new());
|
|
||||||
}
|
|
||||||
|
|
||||||
eventid_info.insert(prev_event_id.clone(), (pdu, json));
|
|
||||||
} else {
|
|
||||||
// Get json failed, so this was not fetched over federation
|
|
||||||
graph.insert(prev_event_id.clone(), HashSet::new());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fetch and handle failed
|
|
||||||
graph.insert(prev_event_id.clone(), HashSet::new());
|
graph.insert(prev_event_id.clone(), HashSet::new());
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
Self::check_room_id(room_id, &pdu)?;
|
||||||
|
|
||||||
|
if amount > services().globals.max_fetch_prev_events() {
|
||||||
|
warn!("Max prev event limit reached");
|
||||||
|
graph.insert(prev_event_id.clone(), HashSet::new());
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let Some(json) = json_opt.or_else(|| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.outlier
|
||||||
|
.get_outlier_pdu_json(&prev_event_id)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
}) else {
|
||||||
|
graph.insert(prev_event_id.clone(), HashSet::new());
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
if pdu.origin_server_ts <= first_pdu_in_room.origin_server_ts {
|
||||||
|
graph.insert(prev_event_id.clone(), HashSet::new());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
amount += 1;
|
||||||
|
|
||||||
|
for prev_prev in &pdu.prev_events {
|
||||||
|
if !graph.contains_key(prev_prev) {
|
||||||
|
todo_outlier_stack.push(prev_prev.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
graph.insert(
|
||||||
|
prev_event_id.clone(),
|
||||||
|
pdu.prev_events.iter().cloned().collect(),
|
||||||
|
);
|
||||||
|
|
||||||
|
eventid_info.insert(prev_event_id.clone(), (pdu, json));
|
||||||
}
|
}
|
||||||
|
|
||||||
let sorted =
|
let sorted =
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue