Compare commits

..

No commits in common. "main" and "pre-fork" have entirely different histories.

311 changed files with 23721 additions and 28126 deletions

View file

@ -1,2 +0,0 @@
[env]
RUMA_UNSTABLE_EXHAUSTIVE_TYPES = "1"

28
.dockerignore Normal file
View file

@ -0,0 +1,28 @@
# Local build and dev artifacts
target
tests
# Docker files
Dockerfile*
docker-compose*
# IDE files
.vscode
.idea
*.iml
# Git folder
.git
.gitea
.gitlab
.github
# Dot files
.env
.gitignore
# Toml files
rustfmt.toml
# Documentation
#*.md

15
.editorconfig Normal file
View file

@ -0,0 +1,15 @@
# EditorConfig is awesome: https://EditorConfig.org
root = true
[*]
charset = utf-8
end_of_line = lf
tab_width = 4
indent_size = 4
indent_style = space
insert_final_newline = true
max_line_length = 120
[*.nix]
indent_size = 2

14
.envrc
View file

@ -1,17 +1,5 @@
#!/usr/bin/env bash
dotenv_if_exists
system="$(nix eval --impure --raw --expr 'builtins.currentSystem')"
devshell="${DIRENV_DEVSHELL:-default}"
if command -v nom &> /dev/null && [ -t 0 ]; then
# if nom is available, build the devshell dependencies with it to get nicer
# progress monitoring. Don't do this when stdout is piped, because it shows
# up weird in engage.
nom build ".#devShells.$system.$devshell"
fi
use flake ".#$devshell"
use flake
PATH_add bin

View file

@ -0,0 +1 @@
- [ ] I agree to release my code and all other changes of this PR under the Apache-2.0 license

11
.github/ISSUE_TEMPLATE/Issue.md vendored Normal file
View file

@ -0,0 +1,11 @@
---
name: "Issue with / Feature Request for Conduit"
about: "Please file issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new"
title: "CLOSE ME"
---
**⚠️ Conduit development does not happen on GitHub. Issues opened here will not be addressed**
Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new

81
.gitignore vendored
View file

@ -1,19 +1,76 @@
# Local environment overrides
/.env
# CMake
cmake-build-*/
# Cargo artifacts
target
# IntelliJ
.idea/
out/
*.iml
modules.xml
*.ipr
# mpeltonen/sbt-idea plugin
.idea_modules/
# Linux backup files
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
# Rust
/target/
### vscode ###
.vscode/*
!.vscode/tasks.json
!.vscode/launch.json
!.vscode/extensions.json
*.code-workspace
### Windows ###
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows shortcuts
*.lnk
# Conduit
conduit.toml
conduit.db
# Etc.
**/*.rs.bk
cached_target
# Nix artifacts
/result*
# Direnv cache
/.direnv
# Nix artifacts
result*
# GitLab CI artifacts
# Gitlab CI cache
/.gitlab-ci.d
/grapevine-static-aarch64-unknown-linux-musl
/grapevine-static-x86_64-unknown-linux-musl
# mdbook artifacts
/public
# mdbook output
public/

View file

@ -1,82 +1,184 @@
stages:
- ci
- artifacts
- deploy
- publish
variables:
# Makes some things print in color
TERM: ansi
before_script:
- |
# Enable nix-command and flakes
echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf
# Disable IFD, to ensure we are able to build without it
echo "allow-import-from-derivation = false" >> /etc/nix/nix.conf
# Add crane binary cache
echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf
echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf
# Add nix-community binary cache
echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf
echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf
# Add our own binary cache
if [ -n "$ATTIC_ENDPOINT" ] && [ -n "$ATTIC_CACHE" ] && [ -n "$ATTIC_PUBLIC_KEY" ]; then
echo "extra-substituters = $ATTIC_ENDPOINT/$ATTIC_CACHE" >> /etc/nix/nix.conf
echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf
fi
# Install direnv
nix profile install --impure --inputs-from . nixpkgs#direnv
# Allow .envrc
direnv allow
# Set CARGO_HOME to a cacheable path
export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
cache-ci-deps:
stage: ci
image: git.lix.systems/lix-project/lix:2.93.3
# Avoid duplicate pipelines
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
workflow:
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
script: direnv exec . job cache-ci-deps
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
- if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS
when: never
- if: $CI
before_script:
# Enable nix-command and flakes
- if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi
# Add our own binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi
# Add alternate binary cache
- if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi
# Add crane binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi
# Add nix-community binary cache
- if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi
- if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi
# Install direnv and nix-direnv
- if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi
# Allow .envrc
- if command -v nix > /dev/null; then direnv allow; fi
# Set CARGO_HOME to a cacheable path
- export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo"
ci:
stage: ci
image: git.lix.systems/lix-project/lix:2.93.3
rules:
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
image: nixos/nix:2.20.4
script:
- direnv exec . job ci
# Cache the inputs required for the devShell
- ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation
- direnv exec . engage
cache:
key: nix
paths:
- target
- .gitlab-ci.d
rules:
# CI on upstream runners (only available for maintainers)
- if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true"
# Manual CI on unprotected branches that are not MRs
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false"
when: manual
# Manual CI on forks
- if: $IS_UPSTREAM_CI != "true"
when: manual
- if: $CI
interruptible: true
artifacts:
stage: artifacts
image: git.lix.systems/lix-project/lix:2.93.3
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_PIPELINE_SOURCE == 'merge_request_event'
image: nixos/nix:2.20.4
script:
- direnv exec . job artifacts
- ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl
- cp result/bin/conduit x86_64-unknown-linux-musl
- mkdir -p target/release
- cp result/bin/conduit target/release
- direnv exec . cargo deb --no-build
- mv target/debian/*.deb x86_64-unknown-linux-musl.deb
# Since the OCI image package is based on the binary package, this has the
# fun side effect of uploading the normal binary too. Conduit users who are
# deploying with Nix can leverage this fact by adding our binary cache to
# their systems.
#
# Note that although we have an `oci-image-x86_64-unknown-linux-musl`
# output, we don't build it because it would be largely redundant to this
# one since it's all containerized anyway.
- ./bin/nix-build-and-cache .#oci-image
- cp result oci-image-amd64.tar.gz
- ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl
- cp result/bin/conduit aarch64-unknown-linux-musl
- ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl
- cp result oci-image-arm64v8.tar.gz
- ./bin/nix-build-and-cache .#book
# We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746
- cp -r --dereference result public
artifacts:
paths:
- grapevine-static-aarch64-unknown-linux-musl
- grapevine-static-x86_64-unknown-linux-musl
- x86_64-unknown-linux-musl
- aarch64-unknown-linux-musl
- x86_64-unknown-linux-musl.deb
- oci-image-amd64.tar.gz
- oci-image-arm64v8.tar.gz
- public
rules:
# CI required for all MRs
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
# Optional CI on forks
- if: $IS_UPSTREAM_CI != "true"
when: manual
allow_failure: true
- if: $CI
interruptible: true
.push-oci-image:
stage: publish
image: docker:25.0.0
services:
- docker:25.0.0-dind
variables:
IMAGE_SUFFIX_AMD64: amd64
IMAGE_SUFFIX_ARM64V8: arm64v8
script:
- docker load -i oci-image-amd64.tar.gz
- IMAGE_ID_AMD64=$(docker images -q conduit:next)
- docker load -i oci-image-arm64v8.tar.gz
- IMAGE_ID_ARM64V8=$(docker images -q conduit:next)
# Tag and push the architecture specific images
- docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
- docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64
- docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
# Tag the multi-arch image
- docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA
# Tag and push the git ref
- docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
- docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME
# Tag git tags as 'latest'
- |
if [[ -n "$CI_COMMIT_TAG" ]]; then
docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8
docker manifest push $IMAGE_NAME:latest
fi
dependencies:
- artifacts
only:
- next
- master
- tags
oci-image:push-gitlab:
extends: .push-oci-image
variables:
IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
oci-image:push-dockerhub:
extends: .push-oci-image
variables:
IMAGE_NAME: matrixconduit/matrix-conduit
before_script:
- docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD
pages:
stage: deploy
image: git.lix.systems/lix-project/lix:2.93.3
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
stage: publish
dependencies:
- artifacts
only:
- next
script:
- direnv exec . job pages
- "true"
artifacts:
paths:
- public

5
.gitlab/CODEOWNERS Normal file
View file

@ -0,0 +1,5 @@
# Nix things
.envrc @CobaltCause
flake.lock @CobaltCause
flake.nix @CobaltCause
nix/ @CobaltCause

View file

@ -0,0 +1,19 @@
<!--
If you're requesting a new feature, that isn't part of this project yet,
then please consider filling out a "Feature Request" instead!
If you need a hand setting up your conduit server, feel free to ask for help in the
Conduit Matrix Chat: https://matrix.to/#/#conduit:fachschaften.org.
-->
### Description
<!-- What did you do and what happened? Why is that bad? -->
### System Configuration
<!-- Other data that might help us debug this issue, like os, conduit version, database backend -->
Conduit Version:
Database backend (default is sqlite): sqlite
/label ~conduit

View file

@ -0,0 +1,17 @@
<!--
If you want to report a bug or an error,
then please consider filling out a "Bug Report" instead!
-->
### Is your feature request related to a problem? Please describe.
<!-- Eg. I'm always frustrated when [...] -->
### Describe the solution you'd like
/label ~conduit

View file

@ -0,0 +1,8 @@
<!-- Please describe your changes here -->
-----------------------------------------------------------------------------
- [ ] I ran `cargo fmt` and `cargo test`
- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license

3
.gitlab/route-map.yml Normal file
View file

@ -0,0 +1,3 @@
# Docs: Map markdown to html files
- source: /docs/(.+)\.md/
public: '\1.html'

View file

@ -0,0 +1,37 @@
#!/bin/sh
set -eux
# --------------------------------------------------------------------- #
# #
# Configures docker buildx to use a remote server for arm building. #
# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with #
# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP #
# #
# This is expected to only be used in the official CI/CD pipeline! #
# #
# Requirements: openssh-client, docker buildx #
# Inspired by: https://depot.dev/blog/building-arm-containers #
# #
# --------------------------------------------------------------------- #
cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add -
# Test server connections:
ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a"
ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a"
# Connect remote arm64 server for all arm builds:
docker buildx create \
--name "multi" \
--driver "docker-container" \
--platform "linux/arm64,linux/arm/v7" \
"ssh://$ARM_SERVER_USER@$ARM_SERVER_IP"
# Connect remote amd64 server for adm64 builds:
docker buildx create --append \
--name "multi" \
--driver "docker-container" \
--platform "linux/amd64" \
"ssh://$AMD_SERVER_USER@$AMD_SERVER_IP"
docker buildx use multi

View file

@ -1 +0,0 @@
.gitignore

View file

@ -1 +0,0 @@
Olivia Lee <olivia@computer.surgery> <benjamin@computer.surgery>

View file

@ -1 +0,0 @@
.gitignore

11
.vscode/extensions.json vendored Normal file
View file

@ -0,0 +1,11 @@
{
"recommendations": [
"rust-lang.rust-analyzer",
"bungcip.better-toml",
"ms-azuretools.vscode-docker",
"eamodio.gitlens",
"serayuzgur.crates",
"vadimcn.vscode-lldb",
"timonwong.shellcheck"
]
}

35
.vscode/launch.json vendored Normal file
View file

@ -0,0 +1,35 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "lldb",
"request": "launch",
"name": "Debug conduit",
"sourceLanguages": ["rust"],
"cargo": {
"args": [
"build",
"--bin=conduit",
"--package=conduit"
],
"filter": {
"name": "conduit",
"kind": "bin"
}
},
"args": [],
"env": {
"RUST_BACKTRACE": "1",
"CONDUIT_CONFIG": "",
"CONDUIT_SERVER_NAME": "localhost",
"CONDUIT_DATABASE_PATH": "/tmp",
"CONDUIT_ADDRESS": "0.0.0.0",
"CONDUIT_PORT": "6167"
},
"cwd": "${workspaceFolder}"
}
]
}

134
CODE_OF_CONDUCT.md Normal file
View file

@ -0,0 +1,134 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or advances of
any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email address,
without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement over email at
coc@koesters.xyz or over Matrix at @timo:conduit.rs.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[homepage]: https://www.contributor-covenant.org
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
[Mozilla CoC]: https://github.com/mozilla/diversity
[FAQ]: https://www.contributor-covenant.org/faq
[translations]: https://www.contributor-covenant.org/translations

2985
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,194 +1,188 @@
# Keep alphabetically sorted
[workspace.lints.rust]
elided_lifetimes_in_paths = "warn"
explicit_outlives_requirements = "warn"
macro_use_extern_crate = "warn"
missing_abi = "warn"
noop_method_call = "warn"
single_use_lifetimes = "warn"
unreachable_pub = "warn"
unsafe_op_in_unsafe_fn = "warn"
unused_extern_crates = "warn"
unused_import_braces = "warn"
unused_lifetimes = "warn"
unused_macro_rules = "warn"
unused_qualifications = "warn"
# Keep alphabetically sorted
[workspace.lints.clippy]
# Groups. Keep alphabetically sorted
pedantic = { level = "warn", priority = -1 }
# Lints. Keep alphabetically sorted
as_conversions = "warn"
assertions_on_result_states = "warn"
cloned_instead_of_copied = "warn"
dbg_macro = "warn"
default_union_representation = "warn"
deref_by_slicing = "warn"
empty_drop = "warn"
empty_structs_with_brackets = "warn"
error_impl_error = "warn"
filetype_is_file = "warn"
float_cmp_const = "warn"
format_push_string = "warn"
get_unwrap = "warn"
if_then_some_else_none = "warn"
impl_trait_in_params = "warn"
let_underscore_must_use = "warn"
lossy_float_literal = "warn"
mem_forget = "warn"
missing_assert_message = "warn"
mod_module_files = "warn"
multiple_inherent_impl = "warn"
mutex_atomic = "warn"
negative_feature_names = "warn"
non_zero_suggestions = "warn"
pub_without_shorthand = "warn"
rc_buffer = "warn"
rc_mutex = "warn"
redundant_feature_names = "warn"
redundant_type_annotations = "warn"
ref_patterns = "warn"
rest_pat_in_fully_bound_structs = "warn"
same_name_method = "warn"
semicolon_inside_block = "warn"
str_to_string = "warn"
string_add = "warn"
string_lit_chars_any = "warn"
string_slice = "warn"
string_to_string = "warn"
suspicious_xor_used_as_pow = "warn"
tests_outside_test_module = "warn"
try_err = "warn"
undocumented_unsafe_blocks = "warn"
unnecessary_safety_comment = "warn"
unnecessary_safety_doc = "warn"
unnecessary_self_imports = "warn"
unneeded_field_pattern = "warn"
unseparated_literal_suffix = "warn"
unused_result_ok = "warn"
verbose_file_reads = "warn"
wildcard_dependencies = "warn"
# TODO: Remove these:
missing_errors_doc = "allow"
missing_panics_doc = "allow"
[package]
name = "grapevine"
name = "conduit"
description = "A Matrix homeserver written in Rust"
license = "Apache-2.0"
version = "0.1.0"
authors = ["timokoesters <timo@koesters.xyz>"]
homepage = "https://conduit.rs"
repository = "https://gitlab.com/famedly/conduit"
readme = "README.md"
version = "0.7.0"
edition = "2021"
# See also `rust-toolchain.toml`
rust-version = "1.85.0"
rust-version = "1.75.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lints]
workspace = true
# Keep sorted
[dependencies]
argon2 = "0.5.3"
async-trait = "0.1.88"
axum = { version = "0.7.9", default-features = false, features = ["form", "http1", "http2", "json", "matched-path", "tokio", "tracing"] }
axum-extra = { version = "0.9.6", features = ["typed-header"] }
axum-server = { git = "https://gitlab.computer.surgery/matrix/thirdparty/axum-server.git", rev = "v0.7.2+grapevine-1", version = "0.7.2", features = ["tls-rustls-no-provider"] }
base64 = "0.22.1"
bytes = "1.10.1"
clap = { version = "4.5.42", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string", "wrap_help"] }
futures-util = { version = "0.3.31", default-features = false }
hmac = "0.12.1"
html-escape = "0.2.13"
http = "1.3.1"
http-body-util = "0.1.3"
hyper = "1.6.0"
image = { version = "0.25.6", default-features = false, features = ["jpeg", "png", "gif"] }
jsonwebtoken = "9.3.1"
lru-cache = "0.1.2"
num_cpus = "1.17.0"
opentelemetry = "0.24.0"
opentelemetry-jaeger-propagator = "0.3.0"
opentelemetry-otlp = "0.17.0"
opentelemetry-prometheus = "0.17.0"
opentelemetry_sdk = { version = "0.24.1", features = ["rt-tokio"] }
parking_lot = { version = "0.12.4", optional = true }
phf = { version = "0.11.3", features = ["macros"] }
pin-project-lite = "0.2.16"
prometheus = "0.13.4"
proxy-header = { version = "0.1.2", features = ["tokio"] }
# Web framework
axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
axum-server = { version = "0.5.1", features = ["tls-rustls"] }
tower = { version = "0.4.13", features = ["util"] }
tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
# Used for matrix spec type definitions and helpers
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
# Async runtime and utilities
tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] }
# Used for storing data permanently
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
persy = { version = "1.4.4", optional = true, features = ["background_ops"] }
# Used for the http request / response body type for Ruma endpoints used with reqwest
bytes = "1.4.0"
http = "0.2.9"
# Used to find data directory for default db path
directories = "4.0.1"
# Used for ruma wrapper
serde_json = { version = "1.0.96", features = ["raw_value"] }
# Used for appservice registration files
serde_yaml = "0.9.21"
# Used for pdu definition
serde = { version = "1.0.163", features = ["rc"] }
# Used for secure identifiers
rand = "0.8.5"
regex = "1.11.1"
reqwest = { version = "0.12.22", default-features = false, features = ["http2", "rustls-tls-native-roots", "socks"] }
ring = "0.17.14"
rocksdb = { package = "rust-rocksdb", version = "0.42.1", features = ["lz4", "multi-threaded-cf", "zstd"], optional = true }
rusqlite = { version = "0.34.0", optional = true, features = ["bundled"] }
rustls = { version = "0.23.31", default-features = false, features = ["ring", "log", "logging", "std", "tls12"] }
sd-notify = { version = "0.4.5", optional = true }
serde = { version = "1.0.219", features = ["rc"] }
serde_html_form = "0.2.7"
serde_json = { version = "1.0.141", features = ["raw_value"] }
serde_yaml = "0.9.34"
sha-1 = "0.10.1"
strum = { version = "0.27.2", features = ["derive"] }
thiserror = "2.0.12"
thread_local = "1.1.9"
tokio = { version = "1.47.0", features = ["fs", "macros", "signal", "sync"] }
toml = "0.8.23"
tower = { version = "0.5.2", features = ["util"] }
tower-http = { version = "0.6.6", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] }
tracing = { version = "0.1.41", features = [] }
# Used to hash passwords
rust-argon2 = "1.0.0"
# Used to send requests
hyper = "0.14.26"
reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] }
# Used for conduit::Error type
thiserror = "1.0.40"
# Used to generate thumbnails for images
image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] }
# Used to encode server public key
base64 = "0.21.2"
# Used when hashing the state
ring = "0.17.7"
# Used when querying the SRV record of other servers
trust-dns-resolver = "0.22.0"
# Used to find matching events for appservices
regex = "1.8.1"
# jwt jsonwebtokens
jsonwebtoken = "9.2.0"
# Performance measurements
tracing = { version = "0.1.37", features = [] }
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
tracing-flame = "0.2.0"
tracing-opentelemetry = "0.25.0"
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "json"] }
trust-dns-resolver = "0.23.2"
xdg = "2.5.2"
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
tracing-opentelemetry = "0.18.0"
lru-cache = "0.1.2"
rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] }
parking_lot = { version = "0.12.1", optional = true }
# crossbeam = { version = "0.8.2", optional = true }
num_cpus = "1.15.0"
threadpool = "1.8.1"
# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
# Used for ruma wrapper
serde_html_form = "0.2.0"
[dependencies.ruma]
git = "https://github.com/ruma/ruma.git"
rev = "c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337"
thread_local = "1.1.7"
# used for TURN server authentication
hmac = "0.12.1"
sha-1 = "0.10.1"
# used for conduit's CLI and admin room command parsing
clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] }
futures-util = { version = "0.3.28", default-features = false }
# Used for reading the configuration from conduit.toml & environment variables
figment = { version = "0.10.8", features = ["env", "toml"] }
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
lazy_static = "1.4.0"
async-trait = "0.1.68"
sd-notify = { version = "0.4.1", optional = true }
[dependencies.rocksdb]
package = "rust-rocksdb"
version = "0.24.0"
optional = true
features = [
"compat-server-signing-key-version",
"compat-empty-string-null",
"compat-null",
"compat-optional",
"compat-unset-avatar",
"compat-get-3pids",
"compat-tag-info",
"compat-optional-txn-pdus",
"rand",
"appservice-api-c",
"client-api",
"federation-api",
"push-gateway-api-c",
"state-res",
"unstable-msc2448",
"ring-compat",
"multi-threaded-cf",
"zstd",
"lz4",
]
[target.'cfg(unix)'.dependencies]
nix = { version = "0.29", features = ["resource", "time"] }
[dev-dependencies]
assert_cmd = "2.0.17"
insta = { version = "1.43.1", features = ["filters", "json", "redactions"] }
predicates = "3.1.3"
tempfile = "3.20.0"
[profile.dev.package.insta]
opt-level = 3
[profile.dev.package.similar]
opt-level = 3
nix = { version = "0.28", features = ["resource"] }
[features]
default = ["rocksdb", "sqlite", "systemd"]
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
#backend_sled = ["sled"]
backend_persy = ["persy", "parking_lot"]
backend_sqlite = ["sqlite"]
#backend_heed = ["heed", "crossbeam"]
backend_rocksdb = ["rocksdb"]
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
conduit_bin = ["axum"]
systemd = ["sd-notify"]
# Keep sorted
rocksdb = ["dep:rocksdb"]
sqlite = ["dep:rusqlite", "dep:parking_lot", "tokio/signal"]
systemd = ["dep:sd-notify"]
[[bin]]
name = "conduit"
path = "src/main.rs"
required-features = ["conduit_bin"]
[profile.release-debug]
inherits = "release"
debug = true
[lib]
name = "conduit"
path = "src/lib.rs"
[package.metadata.deb]
name = "matrix-conduit"
maintainer = "Paul van Tilburg <paul@luon.net>"
copyright = "2020, Timo Kösters <timo@koesters.xyz>"
license-file = ["LICENSE", "3"]
depends = "$auto, ca-certificates"
extended-description = """\
A fast Matrix homeserver that is optimized for smaller, personal servers, \
instead of a server that has high scalability."""
section = "net"
priority = "optional"
assets = [
["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"],
["README.md", "usr/share/doc/matrix-conduit/", "644"],
["target/release/conduit", "usr/sbin/matrix-conduit", "755"],
]
conf-files = [
"/etc/matrix-conduit/conduit.toml"
]
maintainer-scripts = "debian/"
systemd-units = { unit-name = "matrix-conduit" }
[profile.dev]
lto = 'off'
incremental = true
[profile.release]
lto = 'thin'
incremental = true
codegen-units=32
# If you want to make flamegraphs, enable debug info:
# debug = true
# For releases also try to max optimizations for dependencies:
[profile.release.build-override]
opt-level = 3
[profile.release.package."*"]
opt-level = 3

176
LICENSE Normal file
View file

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View file

@ -1,172 +0,0 @@
# Apache License
Version 2.0, January 2004, <http://www.apache.org/licenses/>
## TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
### 1. Definitions
* **"License"** shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
* **"Licensor"** shall mean the copyright owner or entity authorized by the
copyright owner that is granting the License.
* **"Legal Entity"** shall mean the union of the acting entity and all other
entities that control, are controlled by, or are under common control with
that entity. For the purposes of this definition, "control" means (i) the
power, direct or indirect, to cause the direction or management of such
entity, whether by contract or otherwise, or (ii) ownership of fifty percent
(50%) or more of the outstanding shares, or (iii) beneficial ownership of such
entity.
* **"You"** (or **"Your"**) shall mean an individual or Legal Entity exercising
permissions granted by this License.
* **"Source"** form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation source, and
configuration files.
* **"Object"** form shall mean any form resulting from mechanical transformation
or translation of a Source form, including but not limited to compiled object
code, generated documentation, and conversions to other media types.
* **"Work"** shall mean the work of authorship, whether in Source or Object
form, made available under the License, as indicated by a copyright notice
that is included in or attached to the work (an example is provided in the
Appendix below).
* **"Derivative Works"** shall mean any work, whether in Source or Object form,
that is based on (or derived from) the Work and for which the editorial
revisions, annotations, elaborations, or other modifications represent, as
a whole, an original work of authorship. For the purposes of this License,
Derivative Works shall not include works that remain separable from, or merely
link (or bind by name) to the interfaces of, the Work and Derivative Works
thereof.
* **"Contribution"** shall mean any work of authorship, including the original
version of the Work and any modifications or additions to that Work or
Derivative Works thereof, that is intentionally submitted to Licensor for
inclusion in the Work by the copyright owner or by an individual or Legal
Entity authorized to submit on behalf of the copyright owner. For the purposes
of this definition, "submitted" means any form of electronic, verbal, or
written communication sent to the Licensor or its representatives, including
but not limited to communication on electronic mailing lists, source code
control systems, and issue tracking systems that are managed by, or on behalf
of, the Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise designated
in writing by the copyright owner as "Not a Contribution."
* **"Contributor"** shall mean Licensor and any individual or Legal Entity on
behalf of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
### 2. Grant of Copyright License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
### 3. Grant of Patent License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including
a cross-claim or counterclaim in a lawsuit) alleging that the Work or
a Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
### 4. Redistribution
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
1. You must give any other recipients of the Work or Derivative Works a copy of
this License; and
2. You must cause any modified files to carry prominent notices stating that You
changed the files; and
3. You must retain, in the Source form of any Derivative Works that You
distribute, all copyright, patent, trademark, and attribution notices from
the Source form of the Work, excluding those notices that do not pertain to
any part of the Derivative Works; and
4. If the Work includes a "NOTICE" text file as part of its distribution, then
any Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those
notices that do not pertain to any part of the Derivative Works, in at least
one of the following places: within a NOTICE text file distributed as part of
the Derivative Works; within the Source form or documentation, if provided
along with the Derivative Works; or, within a display generated by the
Derivative Works, if and wherever such third-party notices normally appear.
The contents of the NOTICE file are for informational purposes only and do
not modify the License. You may add Your own attribution notices within
Derivative Works that You distribute, alongside or as an addendum to the
NOTICE text from the Work, provided that such additional attribution notices
cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
### 5. Submission of Contributions
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
### 6. Trademarks
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
### 7. Disclaimer of Warranty
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
### 8. Limitation of Liability
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
### 9. Accepting Warranty or Additional Liability
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.

View file

@ -1,9 +1,77 @@
# Grapevine
# Conduit
A Matrix homeserver.
<!-- ANCHOR: catchphrase -->
### A Matrix homeserver written in Rust
<!-- ANCHOR_END: catchphrase -->
## Read the book
Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information.
Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository.
[Click here to read the latest version.][0]
<!-- ANCHOR: body -->
#### What is Matrix?
[0]: https://grapevine.computer.surgery/
[Matrix](https://matrix.org) is an open network for secure and decentralized
communication. Users from every Matrix homeserver can chat with users from all
other Matrix servers. You can even use bridges (also called Matrix appservices)
to communicate with users outside of Matrix, like a community on Discord.
#### What is the goal?
An efficient Matrix homeserver that's easy to set up and just works. You can install
it on a mini-computer like the Raspberry Pi to host Matrix for your family,
friends or company.
#### Can I try it out?
Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login.
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
#### What is the current status?
Conduit is Beta, meaning you can join and participate in most
Matrix rooms, but not all features are supported and you might run into bugs
from time to time.
There are still a few important features missing:
- E2EE emoji comparison over federation (E2EE chat works)
- Outgoing read receipts, typing, presence over federation (incoming works)
<!-- ANCHOR_END: body -->
<!-- ANCHOR: footer -->
#### How can I contribute?
1. Look for an issue you would like to work on and make sure no one else is currently working on it.
2. Tell us that you are working on the issue (comment on the issue or chat in
[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions.
3. Fork the repo, create a new branch and push commits.
4. Submit a MR
#### Contact
If you have any questions, feel free to
- Ask in `#conduit:fachschaften.org` on Matrix
- Write an E-Mail to `conduit@koesters.xyz`
- Send an direct message to `@timokoesters:fachschaften.org` on Matrix
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
#### Thanks to
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
Thanks to the contributors to Conduit and all libraries we use, for example:
- Ruma: A clean library for the Matrix Spec in Rust
- axum: A modular web framework
#### Donate
- Liberapay: <https://liberapay.com/timokoesters/>
- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n`
#### Logo
- Lightning Bolt Logo: <https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg>
- Logo License: <https://github.com/mozilla/fxemoji/blob/gh-pages/LICENSE.md>
<!-- ANCHOR_END: footer -->

37
bin/complement Executable file
View file

@ -0,0 +1,37 @@
#!/usr/bin/env bash
set -euo pipefail
# Path to Complement's source code
COMPLEMENT_SRC="$1"
# A `.jsonl` file to write test logs to
LOG_FILE="$2"
# A `.jsonl` file to write test results to
RESULTS_FILE="$3"
OCI_IMAGE="complement-conduit:dev"
env \
-C "$(git rev-parse --show-toplevel)" \
docker build \
--tag "$OCI_IMAGE" \
--file complement/Dockerfile \
.
# It's okay (likely, even) that `go test` exits nonzero
set +o pipefail
env \
-C "$COMPLEMENT_SRC" \
COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \
go test -json ./tests | tee "$LOG_FILE"
set -o pipefail
# Post-process the results into an easy-to-compare format
cat "$LOG_FILE" | jq -c '
select(
(.Action == "pass" or .Action == "fail" or .Action == "skip")
and .Test != null
) | {Action: .Action, Test: .Test}
' | sort > "$RESULTS_FILE"

131
bin/job
View file

@ -1,131 +0,0 @@
#!/usr/bin/env bash
job_artifacts() (
set -euo pipefail
nix-build-and-cache packages
# Subsequent `nix build` calls should be fast because the above line ensures
# the packages have been built already.
packages=(
static-aarch64-unknown-linux-musl
static-x86_64-unknown-linux-musl
)
for x in "${packages[@]}"; do
nix build ".#$x"
cp result/bin/grapevine grapevine-"$x"
chmod u+w grapevine-"$x"
done
)
job_ci() (
set -euo pipefail
nix-build-and-cache ci
direnv exec . engage
)
job_pages() (
set -euo pipefail
nix build .#website-root
cp --recursive --dereference result public
chmod u+w -R public
)
job_cache-ci-deps() (
set -euo pipefail
nix-build-and-cache ci
)
bail() (
set -euo pipefail
git show --shortstat
echo
echo "Failure caused by the above commit"
exit 1
)
mark_commit_passed() (
set -euo pipefail
mkdir -p ".gitlab-ci.d/passed/$1"
touch ".gitlab-ci.d/passed/$1/$(git rev-parse HEAD)"
)
commit_passed() (
set -euo pipefail
[[ -f ".gitlab-ci.d/passed/$1/$(git rev-parse HEAD)" ]]
)
contains() (
set -euo pipefail
local -n xs=$1
for x in "${xs[@]}"; do
if [[ "$x" == "$2" ]]; then
return 0
else
return 1
fi
done
)
# Jobs that should only run on the latest commit rather than since the branch's
# base.
last_commit_only=(
artifacts
)
run() (
set -euo pipefail
if [[ -z "${1+x}" ]]; then
echo "You must supply a job to run. Available jobs:"
declare -F | rg \
--only-matching \
--color never \
--replace '* $1' \
'^declare -f job_(.*)$'
exit 1
fi
job="$1"
cd "$(git rev-parse --show-toplevel)"
if \
[[ -z "${CI_MERGE_REQUEST_DIFF_BASE_SHA+x}" ]] \
|| contains last_commit_only "$job"
then
echo "Running against latest commit only..."
job_"$job" || bail
else
echo "Running against all commits since this branch's base..."
readarray -t commits < \
<(git rev-list --reverse "$CI_MERGE_REQUEST_DIFF_BASE_SHA..HEAD")
for commit in "${commits[@]}"; do
git checkout "$commit"
if commit_passed "$job"; then
echo "Skipping commit because it already passed: $commit"
continue
fi
job_"$job" || bail
mark_commit_passed "$job"
done
fi
)
run "$@"

View file

@ -2,69 +2,25 @@
set -euo pipefail
# Build and cache the specified arguments
just() {
if command -v nom &> /dev/null; then
nom build "$@"
else
nix build "$@"
fi
# The first argument must be the desired installable
INSTALLABLE="$1"
# Find all output paths of the installables and their build dependencies
readarray -t derivations < <(nix path-info --derivation "$@")
readarray -t upload_paths < <(
xargs \
nix-store --query --requisites --include-outputs \
<<< "${derivations[*]}"
)
# Build the installable and forward any other arguments too
nix build "$@"
echo "Found ${#upload_paths[@]} paths to upload"
if [ ! -z ${ATTIC_TOKEN+x} ]; then
nix run --inputs-from . attic -- \
login \
conduit \
"${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \
"$ATTIC_TOKEN"
if [ -z ${ATTIC_TOKEN+x} ]; then
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
return
fi
nix run --inputs-from . attic#default -- \
login "$ATTIC_SERVER" "$ATTIC_ENDPOINT" "$ATTIC_TOKEN"
# Upload them to Attic. It seems to insist on newlines to separate the
# paths.
(
IFS=$'\n'
nix run --inputs-from . attic#default -- \
push --stdin --no-closure "$ATTIC_SERVER:$ATTIC_CACHE" \
<<< "${upload_paths[*]}"
)
}
# Build and cache things needed for CI
ci() {
installables=(
--inputs-from .
# Keep sorted
".#devShells.x86_64-linux.default"
attic#default
nixpkgs#direnv
nixpkgs#jq
)
just "${installables[@]}"
}
# Build and cache all the package outputs
packages() {
readarray -t installables < <(
nix flake show --json 2> /dev/null |
nix run --inputs-from . nixpkgs#jq -- \
-r \
'.packages."x86_64-linux" | keys | map(".#" + .) | .[]'
)
just "${installables[@]}"
}
pushd "$(git rev-parse --show-toplevel)" > /dev/null
"$@"
popd > /dev/null
# Push the target installable and its build dependencies
nix run --inputs-from . attic -- \
push \
conduit \
"$(nix path-info "$INSTALLABLE" --derivation)" \
"$(nix path-info "$INSTALLABLE")"
else
echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache"
fi

View file

@ -1,12 +1,18 @@
[book]
title = "Grapevine"
title = "Conduit"
description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol"
language = "en"
multilingual = false
src = "book"
src = "docs"
[build]
build-dir = "target/book"
build-dir = "public"
create-missing = true
[output.html]
git-repository-url = "https://gitlab.com/famedly/conduit"
edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}"
git-repository-icon = "fa-git-square"
git-repository-url = "https://gitlab.computer.surgery/matrix/grapevine"
[output.html.search]
limit-results = 15

View file

@ -1,12 +0,0 @@
# Summary
* [Introduction](./introduction.md)
* [Code of conduct](./code-of-conduct.md)
* [Changelog](./changelog.md)
* [Installing](./installing.md)
* [Supported targets](./installing/supported-targets.md)
* [Migrating to/from Conduit](./installing/migrating-conduit.md)
* [Migrating to/from Conduwuit](./installing/migrating-conduwuit.md)
* [Contributing](./contributing.md)
* [Coordinated vulnerability disclosure](./contributing/security.md)
* [Style guide](./contributing/style-guide.md)

View file

@ -1,340 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog][keep-a-changelog], and this project
adheres to [Semantic Versioning][semver].
[keep-a-changelog]: https://keepachangelog.com/en/1.0.0/
[semver]: https://semver.org/spec/v2.0.0.html
<!--
Changelog sections must appear in the following order if they appear for a
particular version so that attention can be drawn to the important parts:
1. Security
2. Removed
3. Deprecated
4. Changed
5. Fixed
6. Added
Entries within each section should be sorted by merge order. If multiple changes
result in a single entry, choose the merge order of the first or last change.
-->
## Unreleased
<!-- TODO: Change "will be" to "is" on release -->
This will be the first release of Grapevine since it was forked from Conduit
0.7.0.
### Security
1. Prevent XSS via user-uploaded media.
([!8](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/8))
2. Switch from incorrect, hand-rolled `X-Matrix` `Authorization` parser to the
much better implementation provided by Ruma.
([!31](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/31))
* This is not practically exploitable to our knowledge, but this change does
reduce risk.
3. Switch to a more trustworthy password hashing library.
([!29](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/29))
* This is not practically exploitable to our knowledge, but this change does
reduce risk.
4. Don't return redacted events from the search endpoint.
([!41 (f74043d)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=f74043df9aa59b406b5086c2e9fa2791a31aa41b),
[!41 (83cdc9c)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=83cdc9c708cd7b50fe1ab40ea6a68dcf252c190b))
5. Prevent impersonation in EDUs.
([!41 (da99b07)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=da99b0706e683a2d347768efe5b50676abdf7b44))
* `m.signing_key_update` was not affected by this bug.
6. Verify PDUs and transactions against the temporally-correct signing keys.
([!41 (9087da9)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=9087da91db8585f34d026a48ba8fdf64865ba14d))
7. Only allow the admin bot to change the room ID that the admin room alias
points to.
([!42](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/42))
### Removed
1. Remove update checker.
([17a0b34](https://gitlab.computer.surgery/matrix/grapevine/-/commit/17a0b3430934fbb8370066ee9dc3506102c5b3f6))
2. Remove optional automatic display name emoji for newly registered users.
([cddf699](https://gitlab.computer.surgery/matrix/grapevine/-/commit/cddf6991f280008b5af5acfab6a9719bb0cfb7f1))
3. Remove admin room welcome message on first startup.
([c9945f6](https://gitlab.computer.surgery/matrix/grapevine/-/commit/c9945f6bbac6e22af6cf955cfa99826d4b04fe8c))
4. Remove incomplete presence implementation.
([f27941d](https://gitlab.computer.surgery/matrix/grapevine/-/commit/f27941d5108acda250921c6a58499a46568fd030))
5. Remove Debian packaging.
([d41f0fb](https://gitlab.computer.surgery/matrix/grapevine/-/commit/d41f0fbf72dae6562358173f425d23bb0e174ca2))
6. Remove Docker packaging.
([!48](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/48))
7. **BREAKING:** Remove unstable room versions.
([!59](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/59))
8. Remove `memory-usage`, `clear-database-caches`, and `clear-service-caches`
admin commands.
([!123](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/123))
* The `memory-usage` command wasn't particularly useful since it can't
actually give you an accurate value in bytes and isn't supported on all
database backends.
* The latter two commands had poor UX and didn't have any noticable effect on
memory consumption.
9. **BREAKING:** Remove the `global.conduit_cache_capacity_modifier` and
`global.pdu_cache_capacity` configuration options.
([!124](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/124))
* Instead, it is now possible to configure each cache capacity individually.
10. Remove jemalloc support.
([!93](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/193))
11. Removed support for MSC3575 (sliding sync), which has been closed.
([!198](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/198))
### Changed
1. **BREAKING:** Rename Conduit to Grapevine.
([360e020](https://gitlab.computer.surgery/matrix/grapevine/-/commit/360e020b644bd012ed438708b661a25fbd124f68))
* The `CONDUIT_VERSION_EXTRA` build-time environment variable has been
renamed to `GRAPEVINE_VERSION_EXTRA`. This change only affects distribution
packagers or non-Nix users who are building from source. If you fall into
one of those categories *and* were explicitly setting this environment
variable, make sure to change its name before building Grapevine.
2. **BREAKING:** Change the default port from 8000 to 6167.
([f205280](https://gitlab.computer.surgery/matrix/grapevine/-/commit/f2052805201f0685d850592b1c96f4861c58fb22))
* If you relied on the default port being 8000, either update your other
configuration to use the new port, or explicitly configure Grapevine's port
to 8000.
3. Improve tracing spans and events.
([!11 (a275db3)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=a275db3847b8d5aaa0c651a686c19cfbf9fdb8b5)
(merged as [5172f66](https://gitlab.computer.surgery/matrix/grapevine/-/commit/5172f66c1a90e0e97b67be2897ae59fbc00208a4)),
[!11 (a275db3)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=a275db3847b8d5aaa0c651a686c19cfbf9fdb8b5)
(merged as [5172f66](https://gitlab.computer.surgery/matrix/grapevine/-/commit/5172f66c1a90e0e97b67be2897ae59fbc00208a4)),
[!11 (f556fce)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=f556fce73eb7beec2ed7b1781df0acdf47920d9c)
(merged as [ac42e0b](https://gitlab.computer.surgery/matrix/grapevine/-/commit/ac42e0bfff6af8677636a3dc1a56701a3255071d)),
[!18](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/18),
[!26](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/26),
[!50](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/50),
[!52](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/52),
[!54](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/54),
[!56](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/56),
[!69](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/69),
[!102](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/102),
[!127](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/127),
[!141](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/141))
4. Stop returning unnecessary member counts from `/_matrix/client/{r0,v3}/sync`.
([!12](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/12))
5. **BREAKING:** Allow federation by default.
([!24](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/24))
* If you relied on federation being disabled by default, make sure to
explicitly disable it before upgrading.
6. **BREAKING:** Remove the `[global]` section from the configuration file.
([!38](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/38))
* Details on how to migrate can be found in the merge request's description.
7. **BREAKING:** Allow specifying multiple transport listeners in the
configuration file.
([!39](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/39))
* Details on how to migrate can be found in the merge request's description.
8. Increase default log level so that span information is included.
([!50](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/50))
9. **BREAKING:** Reorganize config into sections.
([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49))
* Details on how to migrate can be found in the merge request's description.
10. Try to generate thumbnails for remote media ourselves if the federation
thumbnail request fails.
([!58](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/58))
11. **BREAKING:** Disable unauthenticated access to media by default. Use
`media.allow_unauthenticated_access` to configure this behavior.
([!103](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/103),
[!140](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/140))
12. **BREAKING:** Split CLI into multiple subcommands. The CLI invocation to run
the server is now behind the `serve` command, so `grapevine --config ...`
becomes `grapevine serve --config ...`.
([!108](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/108))
13. **BREAKING:** The path to media files is now specified separately from the
database path.
([!140](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/140),
[!170](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/170))
14. Use trust-dns for all DNS queries, instead of only for SRV records and SRV
record targets in server discovery.
([!156](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/156))
### Fixed
1. Fix questionable numeric conversions.
([71c48f6](https://gitlab.computer.surgery/matrix/grapevine/-/commit/71c48f66c4922813c2dc30b7b875200e06ce4b75))
2. Stop sending no-longer-valid cached responses from the
`/_matrix/client/{r0,v3}/sync` endpoints.
([!7](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/7))
3. Stop returning extra E2EE device updates from `/_matrix/client/{r0,v3}/sync`
as that violates the specification.
([!12](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/12))
4. Make certain membership state transitions work correctly again.
([!16](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/16))
* For example, it was previously impossible to unban users from rooms.
5. Ensure that `tracing-flame` flushes all its data before the process exits.
([!20 (263edcc)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=263edcc8a127ad2a541a3bb6ad35a8a459ea5616))
6. Reduce the likelihood of locking up the async runtime.
([!19](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/19))
7. Fix search results not including subsequent pages in certain situations.
([!35 (0cdf032)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/35/diffs?commit_id=0cdf03288ab8fa363c313bd929c8b5183d14ab77))
8. Fix search results missing events in subsequent pages in certain situations.
([!35 (3551a6e)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/35/diffs?commit_id=3551a6ef7a29219b9b30f50a7e8c92b92debcdcf))
9. Only process admin commands if the admin bot is in the admin room.
([!43](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/43))
10. Fix bug where invalid account data from a client could prevent a user from
joining any upgraded rooms and brick rooms that affected users attempted to
upgrade.
([!53](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/53))
11. Fix bug where unexpected keys were deleted from `m.direct` account data
events when joining an upgraded room.
([!53](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/53))
12. Fixed appservice users not receiving federated invites if the local server
isn't already resident in the room
([!80](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/80))
13. Fix bug where, if a server has multiple public keys, only one would be fetched.
([!78](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/78))
14. Fix bug where expired keys may not be re-fetched in some scenarios.
([!78](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/78))
15. Fix bug where signing keys would not be fetched when joining a room if we
hadn't previously seen any signing keys from that server.
([!87](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/87))
16. Fixed bug
([#48](https://gitlab.computer.surgery/matrix/grapevine/-/issues/48))
that caused us to attempt to fetch our own signing keys from ourselves over
federation, and fail ("Won't send federation request to ourselves").
([!96](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/96))
17. Fixed incoming HTTP/2 requests failing federation signature check.
([!104](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/104))
18. Return 403 instead of 500 when joins to a local-only room are denied.
Consequently fixes Heisenbridge being unable to join puppeted users to its
rooms ([#85](https://gitlab.computer.surgery/matrix/grapevine/-/issues/85)).
([!127](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/127))
19. Fix handling of v11 rooms with `m.room.create` event content that passes
the authorization rules but doesn't match other parts of the spec.
([!139](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/139))
20. Fix tiebreaking comparisons between events during state resolution. This
will reduce the rate at which servers disagree about the state of rooms.
([!141](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/141))
21. Fix bug where the backoff state for remote device key queries was not reset
after a successful request, causing an increasing rate of key query failures
over time until a server restart.
([!149](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/149))
22. Fix bug where remote key queries that were skipped because the target server
was in backoff would increment the backoff delay further, leading to a
positive feedback loop.
([!149](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/149))
23. Return 504 M_NOT_YET_UPLOADED instead of 500 M_UNKNOWN when a media file is
present in the database but the contents are missing in the filesystem.
Removing media from the filesystem was the only way to delete media before
[!99](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/99),
so this situation is common.
([!55](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/55))
([!153](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/153))
24. Return 400 M_BAD_ALIAS from
[PUT /_matrix/client/v3/rooms/{roomId}/state/{eventType}/{stateKey}](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey)
instead of 400 M_FORBIDDEN when trying to set a canonical alias that does
not exist.
([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158))
25. Validate schema of new `m.room.canonical_alias` event sent by clients,
rather than silently allowing any contents if the event can't be parsed.
([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158))
26. Only validate canonical aliases that are new, rather than rather than
revalidating every alias. This makes it possible to add/remove aliases when
some of the existing aliases cannot be validated.
([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158))
27. Fix read receipts not being sent over federation (or only arbitrarily late)
([!162](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/162))
28. Fix bug where ban reasons would be ignored when the banned user already had
a member event in the room.
([!185](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/185))
29. Stop stripping unknown properties from redaction events before sending them
to clients.
([!191](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/191))
### Added
1. Add various conveniences for users of the Nix package.
([51f9650](https://gitlab.computer.surgery/matrix/grapevine/-/commit/51f9650ca7bc9378690d331192c85fea3c151b58),
[bbb1a6f](https://gitlab.computer.surgery/matrix/grapevine/-/commit/bbb1a6fea45b16e8d4f94c1afbf7fa22c9281f37))
2. Add a NixOS module.
([33e7a46](https://gitlab.computer.surgery/matrix/grapevine/-/commit/33e7a46b5385ea9035c9d13c6775d63e5626a4c7))
3. Add a Conduit compat mode.
([a25f2ec](https://gitlab.computer.surgery/matrix/grapevine/-/commit/a25f2ec95045c5620c98eead88197a0bf13e6bb3))
* **BREAKING:** If you're migrating from Conduit, this option must be enabled
or else your homeserver will refuse to start.
4. Include `GRAPEVINE_VERSION_EXTRA` information in the
`/_matrix/federation/v1/version` endpoint.
([509b70b](https://gitlab.computer.surgery/matrix/grapevine/-/commit/509b70bd827fec23b88e223b57e0df3b42cede34))
5. Allow multiple tracing subscribers to be active at once.
([!20 (7a154f74)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=7a154f74166c1309ca5752149e02bbe44cd91431))
6. Allow configuring the filter for `tracing-flame`.
([!20 (507de06)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=507de063f53f52e0cf8e2c1a67215a5ad87bb35a))
7. Collect HTTP response time metrics via OpenTelemetry and optionally expose
them as Prometheus metrics. This functionality is disabled by default.
([!22](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/22))
8. Collect metrics for lookup results (e.g. cache hits/misses).
([!15](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/15),
[!36](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/36))
9. Add configuration options for controlling the log format and colors.
([!46](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/46))
10. Recognize the `!admin` prefix to invoke admin commands.
([!45](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/45))
11. Add the `tracing-filter` admin command to view and change log/metrics/flame
filters dynamically at runtime.
([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49),
[!164](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/164))
12. Add more configuration options.
([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49))
* `observability.traces.filter`: The `tracing` filter to use for
OpenTelemetry traces.
* `observability.traces.endpoint`: Where OpenTelemetry should send traces.
* `observability.flame.filter`: The `tracing` filter for `tracing-flame`.
* `observability.flame.filename`: Where `tracing-flame` will write its
output.
* `observability.logs.timestamp`: Whether timestamps should be included in
the logs.
13. Support building nix packages without IFD
([!73](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/73))
14. Report local users getting banned in the server logs and admin room.
([!65](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/65),
[!84](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/84))
15. Added support for Authenticated Media ([MSC3916](https://github.com/matrix-org/matrix-spec-proposals/pull/3916)).
([!58](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/58),
[!111](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/111))
16. **BREAKING:** Added support for configuring and serving
`/.well-known/matrix/...` data.
([!90](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/90),
[!94](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/94))
* The `server_discovery.client.base_url` option is now required.
17. Added support for configuring old verify/signing keys in config (`federation.old_verify_keys`)
([!96](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/96))
18. Added admin commands to delete media
([!99](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/99),
[!102](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/102),
[!148](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/148))
19. Allow configuring the served API components per listener.
([!109](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/109))
20. Include the [`traceresponse` header](https://w3c.github.io/trace-context/#traceresponse-header)
if OpenTelemetry Tracing is in use.
([!112](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/112))
21. Sending SIGHUP to the grapevine process now reloads TLS certificates from
disk.
([!97](https://gitlab.computer.surgery/matrix/grapevine-fork/-/merge_requests/97))
22. Added a federation self-test, perfomed automatically on startup.
([!106](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/106))
23. Added support for HAProxy [proxy protocol](http://www.haproxy.org/download/3.0/doc/proxy-protocol.txt)
listeners.
([!97](https://gitlab.computer.surgery/matrix/grapevine-fork/-/merge_requests/97))
24. Add a `check-config` CLI subcommand to check whether the configuration file
is valid.
([!121](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/121))
25. Add configuration options to tune the value of each cache individually.
([!124](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/124))
26. Allow adding canonical aliases from remote servers.
([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158))
27. Grapevine now sends a User-Agent header on outbound requests
([!189](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/189))
28. Added the ability to listen on Unix sockets
([!187](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/187))
29. Added the ability to allow invalid TLS certificates
([!203](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/203))

View file

@ -1,12 +0,0 @@
# Code of conduct
We follow the [Rust Code of Conduct][rust-coc] with some extra points:
* In the absence of evidence to suggest otherwise, assume good faith when
engaging with others
* Moderation actions may be taken for behavior observed outside of
project-specific spaces
* We have limited patience, so violations may skip the warning and directly
result in a ban
[rust-coc]: https://www.rust-lang.org/policies/code-of-conduct

View file

@ -1,15 +0,0 @@
# Contributing
Instructions for getting GitLab access can be found on the [sign-in][sign-in]
page.
GitLab access is primarily useful if you'd like to open issues, engage in
discussions on issues or merge requests, or submit your own merge requests.
Note that if the sign-up process is too much trouble and you'd just
like to report an issue, feel free to report it in the Matrix room at
[#grapevine:computer.surgery][room]; someone with GitLab access can open an
issue on your behalf.
[room]: https://matrix.to/#/#grapevine:computer.surgery
[sign-in]: https://gitlab.computer.surgery/users/sign_in

View file

@ -1,15 +0,0 @@
# Coordinated vulnerability disclosure
If you find a security vulnerability in Grapevine, please privately report it to
the Grapevine maintainers in one of the following ways:
* Open a GitLab issue that's marked as confidential
* Create a private, invite-only, E2EE Matrix room and invite the following
users:
* `@charles:computer.surgery`
* `@olivia:computer.surgery`
* `@xiretza:xiretza.xyz`
If the maintainers determine that the vulnerability is shared with Conduit or
other forks, we'll work with their teams to ensure that all affected projects
can release a fix at the same time.

View file

@ -1,148 +0,0 @@
# Style guide
It is recommended that contributors follow this guide to minimize nitpicking on
their merge requests.
However, this guide is not set in stone. It is open to changes as new patterns
emerge, requirements change, compelling arguments are made, and so on. The goal
is to document the existing style so it can be applied consistently, not to
ensure the style never changes.
## Merge requests
When updating a merge request branch, use `git rebase`; do not create merge
commits to merge other branches into a merge request branch.
**Why?** This keeps the history simpler, and lacking merge commits makes it
easier to revert any individual commit.
## Commit messages
[Here's a good article][git-commit] on how to write good commit messages in
general.
Specifically for this project:
* Capitalizing the first letter is not required.
* It is recommended to avoid "conventional commits", as they take away from the
very limited subject line length, and will not be processed in an automated
fashion anyway.
**Why?** The linked article explains why this is good practice.
[git-commit]: https://cbea.ms/git-commit/
## Structuring commits
Try to structure each commit so that it falls into one of the following
categories:
* Refactoring, with no behavior change.
* Changing behavior, with no refactoring.
* Removing behavior, with no refactoring.
* Adding behavior, with no refactoring.
* Rewriting something completely. It is rare that these kinds of commits are
warranted.
If you find yourself wanting to use the word "and" in the commit's subject line,
it should probably be broken into multiple commits.
During code review, it's common to get feedback requesting changes to your
commits. To apply this feedback, do not make and push a new commit containing
the requested change. Instead, include the requested change in the commit of
yours that gave rise to the suggestion. If you are unfamiliar with rewriting
history in git, [this website][git-rebase] is a great tutorial.
**Why?** Small, targeted, and well-explained commits make it easier for
reviewers to verify that a change has its intended effect. Or, for someone
running `git bisect` to find a more granular answer to why their test began
failing.
[git-rebase]: https://git-rebase.io/
## `mod`/`use` order
`mod` and `use` statements should appear in the following order, separated by
a blank line:
1. `use` statements referring to `std`, `alloc`, or `core`, if any.
2. `use` statements referring to other crates, if any.
3. `use` statements referring to `super` or `crate`, if any.
4. Macro definitions that need to be accessible from child modules, if any.
5. `mod` statements, if any.
6. `use` statements referring to modules declared by the above `mod` statements,
if any.
`rust-analyzer` and `rustfmt` automate most of this except points 4 and 5.
**Why?** Consistency is good.
## Testing
When writing tests, be sure to keep the contents of [this article][cargo-test]
in mind. Especially, keeping Cargo unit tests in a dedicated tests file
(mentioned towards the end of the article).
**Why?** The linked article explains why this is good practice.
[cargo-test]: https://matklad.github.io/2021/02/27/delete-cargo-integration-tests.html
## Tracing
`tracing` events should:
1. Start with a capital letter (when applicable).
`tracing` events should not:
1. End with punctuation.
2. Interpolate values into the event's message.
* Instead, add those values as structured fields.
**Why?** Consistency is good. Also, interpolating values into the event message
essentially defeats the point of structured logging.
### Examples
#### 1
```rust,ignore
// This does not conform because it does not start with a capital letter.
info!("started pentametric fan");
// Do this instead:
info!("Started pentametric fan");
```
#### 2
```rust,ignore
// This does not conform because it ends with punctuation.
info!("Started pentametric fan.");
// Do this instead:
info!("Started pentametric fan");
```
#### 3
```rust,ignore
// This does not conform because it interpolates values into the event's
// message.
warn!("Noticed {} discombobulated waneshafts", count);
// Do this instead:
warn!(count, "Noticed discombobulated waneshafts");
```
## Services
Services are abstraction units that live inside the `src/service` directory.
Calling service constructors must not cause side effects, with a few exceptions:
* Database reads.
* Local filesystem reads.
**Why?** This restriction makes it possible to implement subcommands that run
"offline" that reuse service code.

View file

@ -1,7 +0,0 @@
# Installing
This chapter will explain how to start running a Grapevine instance for the
first time.
**Note:** Pre-built binaries can be found in the [**Supported
targets**](./installing/supported-targets.md) subchapter.

View file

@ -1,118 +0,0 @@
# Migrating to/from Conduit
Before migrating a Conduit instance to Grapevine, make sure to read through
all of the breaking changes listed in [the changelog](../changelog.md).
In order to migrate an existing Conduit instance to/from Grapevine, the
Grapevine config must include `conduit_compat = true`. This parameter cannot
currently be modified after creating the database for the first time, so make
sure to set it when creating a fresh Grapevine instance that you may want to
migrate to a different implementation in the future.
## Config
Grapevine includes several breaking changes to the config schema. We don't
currently have docs on how to migrate an existing config. All breaking config
changes are mentioned in [the changelog](../changelog.md), so the best current
option is to read through those. Feel free to ask for config migration help in
[#grapevine:computer.surgery][room] if something is unclear.
We plan to add [a config migration tool][config-migration-issue] to support
automatically migrating existing configs to the new schema.
[room]: https://matrix.to/#/#grapevine:computer.surgery
[config-migration-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/38
## Filesystem
Grapevine requires database data and media data to live in **separate**,
**non-nested** directories, which are configurable. Here is a typical example,
starting with the filesystem structure:
```text
/var/lib/grapevine
+ database/
| + database-file-1
| + ...
| + database-file-n
+ media/
+ media-file-1
+ ...
+ media-file-n
```
And here is the matching configuration:
```toml
[database]
path = "/var/lib/grapevine/database"
[media.backend]
type = "filesystem"
path = "/var/lib/grapevine/media"
```
On the other hand, Conduit's filesystem layout looks like this:
```text
/var/lib/conduit
+ media/
| + media-file-1
| + ...
| + media-file-n
+ database-file-1
+ ...
+ database-file-n
```
Which **nests** the media directory inside the database directory. Grapevine
will reject this layout, so the filesystem layout must be changed before
starting Grapevine. It is important to migrate the filesystem layout before
starting Grapevine, because otherwise it will create a fresh database instead of
using the existing one.
## Database
Grapevine is currently compatible with the Conduit 0.7.0 database format. It is
still possible to migrate to or from some newer Conduit versions, but it may
require manual intervention or break some functionality.
We plan to add [a migration tool][db-compatibility-mr] to support cleanly
migrating to or from Conduit versions we are not internally compatible with.
| Is migrating from | to | workable? |
|-|-|-|
| Conduit <=0.8.0 | Grapevine | Yes |
| Conduit 0.9.0 | Grapevine | [Yes, with caveats](#conduit-090-to-grapevine) |
| Grapevine | Conduit 0.7.0 | Yes |
| Grapevine | Conduit 0.8.0 | [Yes, with caveats](#grapevine-to-conduit-080-or-090) |
| Grapevine | Conduit 0.9.0 | [Yes, with caveats](#grapevine-to-conduit-080-or-090) |
[db-compatibility-mr]: https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/85
### Conduit 0.9.0 to Grapevine
Conduit 0.9.0 includes [a database migration][conduit-db-16-migration] that
modifies data that Grapevine doesn't read. Grapevine does not currently
recognize the new database schema version, and will fail to start against
a Conduit 0.9.0 database. Grapevine can start and run without issues if the
version recorded in the databse is rolled back from 16 to 13. It is possible to
do this by editing the database manually, or by modifying Grapevine to change
the version. [This patch][conduit-db-16-patch] is an example of the latter
approach.
[conduit-db-16-migration]: https://gitlab.com/famedly/conduit/-/blob/f8d7ef04e664580e882bac852877b68e7bd3ab1e/src/database/mod.rs#L945
[conduit-db-16-patch]: https://gitlab.computer.surgery/matrix/grapevine/-/commit/fdaa30f0d670c6f04f4e6be5d193f9146d179d95
### Grapevine to Conduit 0.8.0 or 0.9.0
Conduit 0.8.0 added [a new database table][alias_userid-commit] to track which
users created each room alias. Grapevine does not write to this table, so it is
not possible to delete aliases created in Grapevine through the normal
client-server API after migrating to Conduit 0.8.0. It is possible to delete
aliases with the `remove-alias` admin command. Note that this issue also applies
to migrations from Conduit <0.8.0 to Conduit 0.8.0.
There are no additional known issues when migrating to Conduit 0.9.0.
[alias_userid-commit]: https://gitlab.com/famedly/conduit/-/commit/144d548ef739324ca97db12e8cada60ca3e43e09

View file

@ -1,6 +0,0 @@
# Migrating to/from Conduwuit
Current Conduwuit is explicitly incompatible with the Conduit/Grapevine
database format. Some older versions have been migrated to Grapevine
successfully, but we haven't evaluated which versions it is safe to migrate
from yet. Try this at your own risk, and *definitely* take a backup first.

View file

@ -1,37 +0,0 @@
# Supported targets
<!-- Keep this table's rows sorted -->
<!-- markdownlint-disable-next-line MD013 MD033 -->
| Architecture | Vendor | OS | libc | Linkage | Tier | Availability[^1]<sup>, </sup>[^2] |
|-|-|-|-|-|-|-|
| aarch64 | unknown | linux | musl | static | 2 | Nix, [Download](https://gitlab.computer.surgery/api/v4/projects/matrix%2Fgrapevine/jobs/artifacts/main/raw/grapevine-static-aarch64-unknown-linux-musl?job=artifacts) |
| x86_64 | unknown | linux | glibc | dynamic | 1 | Nix |
| x86_64 | unknown | linux | musl | static | 2 | Nix, [Download](https://gitlab.computer.surgery/api/v4/projects/matrix%2Fgrapevine/jobs/artifacts/main/raw/grapevine-static-x86_64-unknown-linux-musl?job=artifacts) |
[^1]: All download links refer to the latest build of the `main` branch.
[^2]: All targets can theoretically also be built from source without Nix.
However, this may require spending several hours debugging build systems.
## Target tiers
The "Tier" column for each target indicates the level of support that target
has. Below is an explanation of what each tier means.
### Tier 1
Tier 1 targets are guaranteed to:
* Build
* Pass the test suite
### Tier 2
Tier 2 targets are guaranteed to:
* Build
## Unsupported targets
Targets that don't appear in the table at the top of this page are unsupported.
At any given time, such targets may or may not build, and may or may not pass
the test suite.

View file

@ -1,110 +0,0 @@
# Introduction
Grapevine is a [Matrix][matrix] homeserver that was originally forked from
[Conduit 0.7.0][conduit].
[matrix]: https://matrix.org/
[conduit]: https://gitlab.com/famedly/conduit/-/tree/v0.7.0?ref_type=tags
## Chat with us
Currently, the Matrix room at [#grapevine:computer.surgery][room] serves
multiple purposes:
* General discussion about the project, such as answering questions about it
* Reporting issues with Grapevine, if getting GitLab access is too much trouble
for you
* Providing support to users running Grapevine
* Discussing the development of Grapevine
If you'd like to engage in or observe any of those things, please join!
[room]: https://matrix.to/#/#grapevine:computer.surgery
## Can I use it?
Theoretically yes, but it's not ready for general use yet, because:
* [There aren't any releases][releases-issue].
* [There isn't very much user-facing documentation][docs-issue].
* There have been several additions and breaking changes to the configuration
file format that haven't been documented in detail. This means you'll need to
read the source code to figure out what all the options are and what they do.
If these issues don't scare you away, go for it! (And if you use NixOS, [here's
an example][nixos-example].)
[docs-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/21
[releases-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/18
[nixos-example]: https://gitlab.computer.surgery/charles/servy-fleet/-/blob/main/config/grapevine/default.nix
## Expectations management
This project is run and maintained entirely by volunteers who are doing their
best. Additionally, due to our goals, the development of new features may be
slower than alternatives. We find this to be an acceptable tradeoff considering
the importance of the reliability of a project like this.
## Goals
Our goal is to provide a robust and reliable Matrix homeserver implementation.
In order to accomplish this, we aim to do the following:
* Optimize for maintainability
* Implement automated testing to ensure correctness
* Improve instrumentation to provide real-world data to aid decision-making
## Non-goals
We also have some things we specifically want to avoid as we feel they inhibit
our ability to accomplish our goals:
* macOS or Windows support
* These operating systems are very uncommon in the hobbyist server space, and
we feel our effort is better spent elsewhere.
* Docker support
* Docker tends to generate a high volume of support requests that are solely
due to Docker itself or how users are using Docker. In attempt to mitigate
this, we will not provide first-party Docker images. Instead, we'd recommend
avoiding Docker and either using our pre-built statically-linked binaries
or building from source. However, if your deployment mechanism *requires*
Docker, it should be straightforward to build your own Docker image.
* Configuration via environment variables
* Environment variables restrict the options for structuring configuration and
support for them would increase the maintenance burden. If your deployment
mechanism requires this, consider using an external tool like
[`envsubst`][envsubst].
* Configuration compatibility with Conduit
* To provide a secure and ergonomic configuration experience, breaking changes
are required. However, [we do intend to provide a migration tool to ease
migration][migration-tool].
* Perfect database compatibility with Conduit
* [This issue tracks the database compatibility status][db-compat]. In the
long run, it's inevitable that changes will be made to Conduit that we won't
want to pull in, or that we need to make changes that Conduit won't want to
pull in.
[envsubst]: https://github.com/a8m/envsubst
[migration-tool]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/38
[db-compat]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/17
## Project management
The project's current maintainers[^1] are:
| Matrix username | GitLab username |
|-|-|
| `@charles:computer.surgery` | `charles` |
| `@olivia:computer.surgery` | `olivia` |
| `@xiretza:xiretza.xyz` | `Lambda` |
We would like to expand this list in the future as social trust is built and
technical competence is demonstrated by other contributors.
We require at least 1 approving code review from a maintainer[^2] before changes
can be merged. This number may increase in the future as the list of maintainers
grows.
[^1]: A "maintainer" is someone who has the ability to close issues opened by
someone else and merge changes.
[^2]: A maintainer approving their own change doesn't count.

View file

@ -1 +0,0 @@
# Migration to/from Conduit

View file

@ -1,4 +0,0 @@
doc-valid-idents = [
"SemVer",
"..",
]

45
complement/Dockerfile Normal file
View file

@ -0,0 +1,45 @@
FROM rust:1.75.0
WORKDIR /workdir
RUN apt-get update && apt-get install -y --no-install-recommends \
libclang-dev
COPY Cargo.toml Cargo.toml
COPY Cargo.lock Cargo.lock
COPY src src
RUN cargo build --release \
&& mv target/release/conduit conduit \
&& rm -rf target
# Install caddy
RUN apt-get update \
&& apt-get install -y \
debian-keyring \
debian-archive-keyring \
apt-transport-https \
curl \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \
| gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \
&& curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \
| tee /etc/apt/sources.list.d/caddy-testing.list \
&& apt-get update \
&& apt-get install -y caddy
COPY conduit-example.toml conduit.toml
COPY complement/caddy.json caddy.json
ENV SERVER_NAME=localhost
ENV CONDUIT_CONFIG=/workdir/conduit.toml
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml
EXPOSE 8008 8448
CMD uname -a && \
sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \
sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \
caddy start --config caddy.json > /dev/null && \
/workdir/conduit

11
complement/README.md Normal file
View file

@ -0,0 +1,11 @@
# Complement
## What's that?
Have a look at [its repository](https://github.com/matrix-org/complement).
## How do I use it with Conduit?
The script at [`../bin/complement`](../bin/complement) has automation for this.
It takes a few command line arguments, you can read the script to find out what
those are.

72
complement/caddy.json Normal file
View file

@ -0,0 +1,72 @@
{
"logging": {
"logs": {
"default": {
"level": "WARN"
}
}
},
"apps": {
"http": {
"https_port": 8448,
"servers": {
"srv0": {
"listen": [":8448"],
"routes": [{
"match": [{
"host": ["your.server.name"]
}],
"handle": [{
"handler": "subroute",
"routes": [{
"handle": [{
"handler": "reverse_proxy",
"upstreams": [{
"dial": "127.0.0.1:8008"
}]
}]
}]
}],
"terminal": true
}],
"tls_connection_policies": [{
"match": {
"sni": ["your.server.name"]
}
}]
}
}
},
"pki": {
"certificate_authorities": {
"local": {
"name": "Complement CA",
"root": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
},
"intermediate": {
"certificate": "/complement/ca/ca.crt",
"private_key": "/complement/ca/ca.key"
}
}
}
},
"tls": {
"automation": {
"policies": [{
"subjects": ["your.server.name"],
"issuers": [{
"module": "internal"
}],
"on_demand": true
}, {
"issuers": [{
"module": "internal",
"ca": "local"
}]
}]
}
}
}
}

67
conduit-example.toml Normal file
View file

@ -0,0 +1,67 @@
# =============================================================================
# This is the official example config for Conduit.
# If you use it for your server, you will need to adjust it to your own needs.
# At the very least, change the server_name field!
# =============================================================================
[global]
# The server_name is the pretty name of this server. It is used as a suffix for user
# and room ids. Examples: matrix.org, conduit.rs
# The Conduit server needs all /_matrix/ requests to be reachable at
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
# If that's not possible for you, you can create /.well-known files to redirect
# requests. See
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
# and
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
# for more information
# YOU NEED TO EDIT THIS
#server_name = "your.server.name"
# This is the only directory where Conduit will save its data
database_path = "/var/lib/matrix-conduit/"
database_backend = "rocksdb"
# The port Conduit will be running on. You need to set up a reverse proxy in
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
# 443 and 8448 will be forwarded to the Conduit instance running on this port
# Docker users: Don't change this, you'll need to map an external port to this.
port = 6167
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# A static registration token that new users will have to provide when creating
# an account. YOU NEED TO EDIT THIS.
# - Insert a password that users will have to enter on registration
# - Start the line with '#' to remove the condition
registration_token = ""
allow_federation = true
allow_check_for_updates = true
# Enable the display name lightning bolt on registration.
enable_lightning_bolt = true
# Servers listed here will be used to gather public keys of other servers.
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
# support batched key requests, so this list should only contain Synapse
# servers.)
trusted_servers = ["matrix.org"]
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
# Controls the log verbosity. See also [here][0].
#
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
#log = "..."
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.

37
debian/README.md vendored Normal file
View file

@ -0,0 +1,37 @@
Conduit for Debian
==================
Installation
------------
Information about downloading, building and deploying the Debian package, see
the "Installing Conduit" section in the Deploying docs.
All following sections until "Setting up the Reverse Proxy" be ignored because
this is handled automatically by the packaging.
Configuration
-------------
When installed, Debconf generates the configuration of the homeserver
(host)name, the address and port it listens on. This configuration ends up in
`/etc/matrix-conduit/conduit.toml`.
You can tweak more detailed settings by uncommenting and setting the variables
in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum
file size for download/upload, enabling federation, etc.
Running
-------
The package uses the `matrix-conduit.service` systemd unit file to start and
stop Conduit. It loads the configuration file mentioned above to set up the
environment before running the server.
This package assumes by default that Conduit will be placed behind a reverse
proxy such as Apache or nginx. This default deployment entails just listening
on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL
<http://localhost:6167>.
At a later stage this packaging may support also setting up TLS and running
stand-alone. In this case, however, you need to set up some certificates and
renewal, for it to work properly.

17
debian/config vendored Normal file
View file

@ -0,0 +1,17 @@
#!/bin/sh
set -e
# Source debconf library.
. /usr/share/debconf/confmodule
# Ask for the Matrix homeserver name, address and port.
db_input high matrix-conduit/hostname || true
db_go
db_input low matrix-conduit/address || true
db_go
db_input medium matrix-conduit/port || true
db_go
exit 0

47
debian/matrix-conduit.service vendored Normal file
View file

@ -0,0 +1,47 @@
[Unit]
Description=Conduit Matrix homeserver
After=network.target
[Service]
DynamicUser=yes
User=_matrix-conduit
Group=_matrix-conduit
Type=simple
AmbientCapabilities=
CapabilityBoundingSet=
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
ProtectClock=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectKernelTunables=yes
ProtectSystem=strict
PrivateDevices=yes
PrivateMounts=yes
PrivateTmp=yes
PrivateUsers=yes
RemoveIPC=yes
RestrictAddressFamilies=AF_INET AF_INET6
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallFilter=@system-service
SystemCallErrorNumber=EPERM
StateDirectory=matrix-conduit
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
ExecStart=/usr/sbin/matrix-conduit
Restart=on-failure
RestartSec=10
StartLimitInterval=1m
StartLimitBurst=5
[Install]
WantedBy=multi-user.target

104
debian/postinst vendored Normal file
View file

@ -0,0 +1,104 @@
#!/bin/sh
set -e
. /usr/share/debconf/confmodule
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml"
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/
case "$1" in
configure)
# Create the `_matrix-conduit` user if it does not exist yet.
if ! getent passwd _matrix-conduit > /dev/null ; then
echo 'Adding system user for the Conduit Matrix homeserver' 1>&2
adduser --system --group --quiet \
--home "$CONDUIT_DATABASE_PATH" \
--disabled-login \
--force-badname \
_matrix-conduit
fi
# Create the database path if it does not exist yet and fix up ownership
# and permissions.
mkdir -p "$CONDUIT_DATABASE_PATH"
chown _matrix-conduit "$CONDUIT_DATABASE_PATH"
chmod 700 "$CONDUIT_DATABASE_PATH"
if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then
# Write the debconf values in the config.
db_get matrix-conduit/hostname
CONDUIT_SERVER_NAME="$RET"
db_get matrix-conduit/address
CONDUIT_ADDRESS="$RET"
db_get matrix-conduit/port
CONDUIT_PORT="$RET"
mkdir -p "$CONDUIT_CONFIG_PATH"
cat > "$CONDUIT_CONFIG_FILE" << EOF
[global]
# The server_name is the pretty name of this server. It is used as a suffix for
# user and room ids. Examples: matrix.org, conduit.rs
# The Conduit server needs all /_matrix/ requests to be reachable at
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
# If that's not possible for you, you can create /.well-known files to redirect
# requests. See
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
# and
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
# for more information
server_name = "${CONDUIT_SERVER_NAME}"
# This is the only directory where Conduit will save its data.
database_path = "${CONDUIT_DATABASE_PATH}"
database_backend = "rocksdb"
# The address Conduit will be listening on.
# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to
# only listen on the localhost when using a reverse proxy.
address = "${CONDUIT_ADDRESS}"
# The port Conduit will be running on. You need to set up a reverse proxy in
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
# 443 and 8448 will be forwarded to the Conduit instance running on this port
# Docker users: Don't change this, you'll need to map an external port to this.
port = ${CONDUIT_PORT}
# Max size for uploads
max_request_size = 20_000_000 # in bytes
# Enables registration. If set to false, no users can register on this server.
allow_registration = true
# A static registration token that new users will have to provide when creating
# an account.
# - Insert a password that users will have to enter on registration
# - Start the line with '#' to remove the condition
#registration_token = ""
allow_federation = true
allow_check_for_updates = true
# Enable the display name lightning bolt on registration.
enable_lightning_bolt = true
# Servers listed here will be used to gather public keys of other servers.
# Generally, copying this exactly should be enough. (Currently, Conduit doesn't
# support batched key requests, so this list should only contain Synapse
# servers.)
trusted_servers = ["matrix.org"]
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
# Controls the log verbosity. See also [here][0].
#
# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives
#log = "..."
EOF
fi
;;
esac
#DEBHELPER#

27
debian/postrm vendored Normal file
View file

@ -0,0 +1,27 @@
#!/bin/sh
set -e
. /usr/share/debconf/confmodule
CONDUIT_CONFIG_PATH=/etc/matrix-conduit
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit
case $1 in
purge)
# Remove debconf changes from the db
db_purge
# Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior
# "configuration files must be preserved when the package is removed, and
# only deleted when the package is purged."
if [ -d "$CONDUIT_CONFIG_PATH" ]; then
rm -r "$CONDUIT_CONFIG_PATH"
fi
if [ -d "$CONDUIT_DATABASE_PATH" ]; then
rm -r "$CONDUIT_DATABASE_PATH"
fi
;;
esac
#DEBHELPER#

21
debian/templates vendored Normal file
View file

@ -0,0 +1,21 @@
Template: matrix-conduit/hostname
Type: string
Default: localhost
Description: The server (host)name of the Matrix homeserver
This is the hostname the homeserver will be reachable at via a client.
.
If set to "localhost", you can connect with a client locally and clients
from other hosts and also other homeservers will not be able to reach you!
Template: matrix-conduit/address
Type: string
Default: 127.0.0.1
Description: The listen address of the Matrix homeserver
This is the address the homeserver will listen on. Leave it set to 127.0.0.1
when using a reverse proxy.
Template: matrix-conduit/port
Type: string
Default: 6167
Description: The port of the Matrix homeserver
This port is most often just accessed by a reverse proxy.

View file

@ -0,0 +1,84 @@
# syntax=docker/dockerfile:1
# ---------------------------------------------------------------------------------------------------------
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
#
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
# Credit's for the original Dockerfile: Weasy666.
# ---------------------------------------------------------------------------------------------------------
FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner
# Standard port on which Conduit launches.
# You still need to map the port when using the docker command or docker-compose.
EXPOSE 6167
# Users are expected to mount a volume to this directory:
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
ENV CONDUIT_PORT=6167 \
CONDUIT_ADDRESS="0.0.0.0" \
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
CONDUIT_CONFIG=''
# └─> Set no config file to do all configuration with env vars
# Conduit needs:
# ca-certificates: for https
# iproute2: for `ss` for the healthcheck script
RUN apk add --no-cache \
ca-certificates \
iproute2
ARG CREATED
ARG VERSION
ARG GIT_REF
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
# including a custom label specifying the build command
LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.authors="Conduit Contributors" \
org.opencontainers.image.title="Conduit" \
org.opencontainers.image.version=${VERSION} \
org.opencontainers.image.vendor="Conduit Contributors" \
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
org.opencontainers.image.licenses="Apache-2.0" \
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
org.opencontainers.image.ref.name=""
# Test if Conduit is still alive, uses the same endpoint as Element
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
# Improve security: Don't run stuff as root, that does not need to run as root:
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
ARG USER_ID=1000
ARG GROUP_ID=1000
RUN set -x ; \
deluser --remove-home www-data ; \
addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \
adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \
addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1
# Change ownership of Conduit files to conduit user and group
RUN chown -cR conduit:conduit /srv/conduit && \
chmod +x /srv/conduit/healthcheck.sh && \
mkdir -p ${DEFAULT_DB_PATH} && \
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
# Change user to conduit
USER conduit
# Set container home directory
WORKDIR /srv/conduit
# Run Conduit and print backtraces on panics
ENV RUST_BACKTRACE=1
ENTRYPOINT [ "/srv/conduit/conduit" ]
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
# copy the matching binary into this docker image
ARG TARGETPLATFORM
COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit

19
docker/healthcheck.sh Normal file
View file

@ -0,0 +1,19 @@
#!/bin/sh
# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create
# try to get port from process list
if [ -z "${CONDUIT_PORT}" ]; then
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
fi
# If CONDUIT_ADDRESS is not set try to get the address from the process list
if [ -z "${CONDUIT_ADDRESS}" ]; then
CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }')
fi
# The actual health check.
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \
exit 1

12
docs/SUMMARY.md Normal file
View file

@ -0,0 +1,12 @@
# Summary
- [Introduction](introduction.md)
- [Configuration](configuration.md)
- [Deploying](deploying.md)
- [Generic](deploying/generic.md)
- [Debian](deploying/debian.md)
- [Docker](deploying/docker.md)
- [NixOS](deploying/nixos.md)
- [TURN](turn.md)
- [Appservices](appservices.md)

61
docs/appservices.md Normal file
View file

@ -0,0 +1,61 @@
# Setting up Appservices
## Getting help
If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Set up the appservice - general instructions
Follow whatever instructions are given by the appservice. This usually includes
downloading, changing its config (setting domain, homeserver url, port etc.)
and later starting it.
At some point the appservice guide should ask you to add a registration yaml
file to the homeserver. In Synapse you would do this by adding the path to the
homeserver.yaml, but in Conduit you can do this from within Matrix:
First, go into the #admins room of your homeserver. The first person that
registered on the homeserver automatically joins it. Then send a message into
the room like this:
@conduit:your.server.name: register-appservice
```
paste
the
contents
of
the
yaml
registration
here
```
You can confirm it worked by sending a message like this:
`@conduit:your.server.name: list-appservices`
The @conduit bot should answer with `Appservices (1): your-bridge`
Then you are done. Conduit will send messages to the appservices and the
appservice can send requests to the homeserver. You don't need to restart
Conduit, but if it doesn't work, restarting while the appservice is running
could help.
## Appservice-specific instructions
### Remove an appservice
To remove an appservice go to your admin room and execute
`@conduit:your.server.name: unregister-appservice <name>`
where `<name>` one of the output of `list-appservices`.
### Tested appservices
These appservices have been tested and work with Conduit without any extra steps:
- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord)
- [mautrix-hangouts](https://github.com/mautrix/hangouts/)
- [mautrix-telegram](https://github.com/mautrix/telegram/)
- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward.
- [heisenbridge](https://github.com/hifi/heisenbridge/)

110
docs/configuration.md Normal file
View file

@ -0,0 +1,110 @@
# Configuration
**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable.
> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error.
> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect
Conduit's configuration file is divided into the following sections:
- [Global](#global)
- [TLS](#tls)
- [Proxy](#proxy)
## Global
The `global` section contains the following fields:
> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values
| Field | Type | Description | Default |
| --- | --- | --- | --- |
| `address` | `string` | The address to bind to | `"127.0.0.1"` |
| `port` | `integer` | The port to bind to | `8000` |
| `tls` | `table` | See the [TLS configuration](#tls) | N/A |
| `server_name`_*_ | `string` | The server name | N/A |
| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A |
| `database_path`_*_ | `string` | The path to the database file/dir | N/A |
| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` |
| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` |
| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` |
| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` |
| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` |
| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` |
| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` |
| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) |
| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` |
| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` |
| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` |
| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A |
| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` |
| `allow_federation` | `boolean` | Allow federation with other servers | `true` |
| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` |
| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` |
| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` |
| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` |
| `tracing_flame` | `boolean` | Enable flame tracing | `false` |
| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A |
| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A |
| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` |
| `log` | `string` | The log verbosity to use | `"warn"` |
| `turn_username` | `string` | The TURN username | `""` |
| `turn_password` | `string` | The TURN password | `""` |
| `turn_uris` | `array` | The TURN URIs | `[]` |
| `turn_secret` | `string` | The TURN secret | `""` |
| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` |
| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A |
### TLS
The `tls` table contains the following fields:
- `certs`: The path to the public PEM certificate
- `key`: The path to the PEM private key
#### Example
```toml
[global.tls]
certs = "/path/to/cert.pem"
key = "/path/to/key.pem"
```
### Proxy
You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields
#### Global
The global option will proxy all outgoing requests. The `global` table contains the following fields:
- `url`: The URL of the proxy server
##### Example
```toml
[global.proxy.global]
url = "https://example.com"
```
#### By domain
An array of tables that contain the following fields:
- `url`: The URL of the proxy server
- `include`: Domains that should be proxied (assumed to be `["*"]` if unset)
- `exclude`: Domains that should not be proxied (takes precedent over `include`)
Both `include` and `exclude` allow for glob pattern matching.
##### Example
In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz`
will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need.
```toml
[[global.proxy.by_domain]]
url = "socks5://localhost:9050"
include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"]
exclude = ["*.clearnet.onion"]
```
### Example
> **Note:** The following example is a minimal configuration file. You should replace the values with your own.
```toml
[global]
{{#include ../conduit-example.toml:22:}}
```

3
docs/deploying.md Normal file
View file

@ -0,0 +1,3 @@
# Deploying
This chapter describes various ways to deploy Conduit.

1
docs/deploying/debian.md Normal file
View file

@ -0,0 +1 @@
{{#include ../../debian/README.md}}

View file

@ -0,0 +1,69 @@
# Conduit - Behind Traefik Reverse Proxy
version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
# build:
# context: .
# args:
# CREATED: '2021-03-16T08:18:27Z'
# VERSION: '0.1.0'
# LOCAL: 'false'
# GIT_REF: origin/master
restart: unless-stopped
volumes:
- db:/var/lib/matrix-conduit/
networks:
- proxy
environment:
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
CONDUIT_DATABASE_BACKEND: rocksdb
CONDUIT_PORT: 6167
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUIT_ALLOW_REGISTRATION: 'true'
#CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
CONDUIT_ALLOW_FEDERATION: 'true'
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
CONDUIT_ADDRESS: 0.0.0.0
CONDUIT_CONFIG: '' # Ignore this
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
# and in the docker-compose override file.
well-known:
image: nginx:latest
restart: unless-stopped
volumes:
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second
### Domain or Subdomain for the communication between Element and Conduit
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
# element-web:
# image: vectorim/element-web:latest
# restart: unless-stopped
# volumes:
# - ./element_config.json:/app/config.json
# networks:
# - proxy
# depends_on:
# - homeserver
volumes:
db:
networks:
# This is the network Traefik listens to, if your network has a different
# name, don't forget to change it here and in the docker-compose.override.yml
proxy:
external: true

View file

@ -0,0 +1,45 @@
# Conduit - Traefik Reverse Proxy Labels
version: '3'
services:
homeserver:
labels:
- "traefik.enable=true"
- "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
- "traefik.http.routers.to-conduit.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Conduit is hosted
- "traefik.http.routers.to-conduit.tls=true"
- "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-conduit.middlewares=cors-headers@docker"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
# and in the docker-compose file.
well-known:
labels:
- "traefik.enable=true"
- "traefik.docker.network=proxy"
- "traefik.http.routers.to-matrix-wellknown.rule=Host(`<SUBDOMAIN>.<DOMAIN>`) && PathPrefix(`/.well-known/matrix`)"
- "traefik.http.routers.to-matrix-wellknown.tls=true"
- "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt"
- "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization"
- "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS"
### Uncomment this if you uncommented Element-Web App in the docker-compose.yml
# element-web:
# labels:
# - "traefik.enable=true"
# - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network
# - "traefik.http.routers.to-element-web.rule=Host(`<SUBDOMAIN>.<DOMAIN>`)" # Change to the address on which Element-Web is hosted
# - "traefik.http.routers.to-element-web.tls=true"
# - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt"

View file

@ -0,0 +1,96 @@
# Conduit - Behind Traefik Reverse Proxy
version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
# build:
# context: .
# args:
# CREATED: '2021-03-16T08:18:27Z'
# VERSION: '0.1.0'
# LOCAL: 'false'
# GIT_REF: origin/master
restart: unless-stopped
volumes:
- db:/srv/conduit/.local/share/conduit
### Uncomment if you want to use conduit.toml to configure Conduit
### Note: Set env vars will override conduit.toml values
# - ./conduit.toml:/srv/conduit/conduit.toml
networks:
- proxy
environment:
CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
CONDUIT_ALLOW_REGISTRATION : 'true'
### Uncomment and change values as desired
# CONDUIT_ADDRESS: 0.0.0.0
# CONDUIT_PORT: 6167
# CONDUIT_REGISTRATION_TOKEN: '' # require password for registration
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
# CONDUIT_ALLOW_ENCRYPTION: 'true'
# CONDUIT_ALLOW_FEDERATION: 'true'
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
# and in the docker-compose override file.
well-known:
image: nginx:latest
restart: unless-stopped
volumes:
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second
### Domain or Subdomain for the communication between Element and Conduit
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
# element-web:
# image: vectorim/element-web:latest
# restart: unless-stopped
# volumes:
# - ./element_config.json:/app/config.json
# networks:
# - proxy
# depends_on:
# - homeserver
traefik:
image: "traefik:latest"
container_name: "traefik"
restart: "unless-stopped"
ports:
- "80:80"
- "443:443"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
# - "./traefik_config:/etc/traefik"
- "acme:/etc/traefik/acme"
labels:
- "traefik.enable=true"
# middleware redirect
- "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https"
# global redirect to https
- "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)"
- "traefik.http.routers.redirs.entrypoints=http"
- "traefik.http.routers.redirs.middlewares=redirect-to-https"
networks:
- proxy
volumes:
db:
acme:
networks:
proxy:

View file

@ -0,0 +1,53 @@
# Conduit
version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build' or want to use a registry image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
# build:
# context: .
# args:
# CREATED: '2021-03-16T08:18:27Z'
# VERSION: '0.1.0'
# LOCAL: 'false'
# GIT_REF: origin/master
restart: unless-stopped
ports:
- 8448:6167
volumes:
- db:/var/lib/matrix-conduit/
environment:
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
CONDUIT_DATABASE_BACKEND: rocksdb
CONDUIT_PORT: 6167
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
CONDUIT_ALLOW_REGISTRATION: 'true'
CONDUIT_ALLOW_FEDERATION: 'true'
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
CONDUIT_ADDRESS: 0.0.0.0
CONDUIT_CONFIG: '' # Ignore this
#
### Uncomment if you want to use your own Element-Web App.
### Note: You need to provide a config.json for Element and you also need a second
### Domain or Subdomain for the communication between Element and Conduit
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
# element-web:
# image: vectorim/element-web:latest
# restart: unless-stopped
# ports:
# - 8009:80
# volumes:
# - ./element_config.json:/app/config.json
# depends_on:
# - homeserver
volumes:
db:

216
docs/deploying/docker.md Normal file
View file

@ -0,0 +1,216 @@
# Conduit for Docker
> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate.
## Docker
To run Conduit with Docker you can either build the image yourself or pull it from a registry.
### Use a registry
OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry.
| Registry | Image | Size | Notes |
| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- |
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. |
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. |
| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. |
| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. |
[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit
[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937
[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest
[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next
Use
```bash
docker image pull <link>
```
to pull it to your machine.
### Build using a dockerfile
The Dockerfile provided by Conduit has two stages, each of which creates an image.
1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository.
2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions.
To build the image you can use the following command
```bash
docker build --tag matrixconduit/matrix-conduit:latest .
```
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
### Run
When you have the image you can simply run it with
```bash
docker run -d -p 8448:6167 \
-v db:/var/lib/matrix-conduit/ \
-e CONDUIT_SERVER_NAME="your.server.name" \
-e CONDUIT_DATABASE_BACKEND="rocksdb" \
-e CONDUIT_ALLOW_REGISTRATION=true \
-e CONDUIT_ALLOW_FEDERATION=true \
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
--name conduit <link>
```
or you can use [docker-compose](#docker-compose).
The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md).
You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need
to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file.
If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it.
### Docker-compose
If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files.
Depending on your proxy setup, you can use one of the following files;
- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml)
- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)
- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml)
When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and
rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want
for your server.
Additional info about deploying Conduit can be found [here](generic.md).
### Build
To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with:
```bash
docker-compose up
```
This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag.
### Run
If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with:
```bash
docker-compose up -d
```
> **Note:** Don't forget to modify and adjust the compose file to your needs.
### Use Traefik as Proxy
As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making
containerized app and services available through the web. With the two provided files,
[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and
[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy
and use Conduit, with a little caveat. If you already took a look at the files, then you should have
seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and
loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to
either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and
`.well-known/matrix/server`.
With the service `well-known` we use a single `nginx` container that will serve those two files.
So...step by step:
1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or
[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename.
2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs.
3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars.
4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`.
5. Create the files needed by the `well-known` service.
- `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping)
```nginx
server {
server_name <SUBDOMAIN>.<DOMAIN>;
listen 80 default_server;
location /.well-known/matrix/server {
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
types { } default_type "application/json; charset=utf-8";
}
location /.well-known/matrix/client {
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
types { } default_type "application/json; charset=utf-8";
add_header "Access-Control-Allow-Origin" *;
}
location / {
return 404;
}
}
```
6. Run `docker-compose up -d`
7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin.
## Voice communication
In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place.
### Configuration
Create a configuration file called `coturn.conf` containing:
```conf
use-auth-secret
static-auth-secret=<a secret key>
realm=<your server domain>
```
A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`.
These same values need to be set in conduit. You can either modify conduit.toml to include these lines:
```
turn_uris = ["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]
turn_secret = "<secret key from coturn configuration>"
```
or append the following to the docker environment variables dependig on which configuration method you used earlier:
```yml
CONDUIT_TURN_URIS: '["turn:<your server domain>?transport=udp", "turn:<your server domain>?transport=tcp"]'
CONDUIT_TURN_SECRET: "<secret key from coturn configuration>"
```
Restart Conduit to apply these changes.
### Run
Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using
```bash
docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn
```
or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml`
and run `docker-compose up -d` in the same directory.
```yml
version: 3
services:
turn:
container_name: coturn-server
image: docker.io/coturn/coturn
restart: unless-stopped
network_mode: "host"
volumes:
- ./coturn.conf:/etc/coturn/turnserver.conf
```
To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md.
For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration).

292
docs/deploying/generic.md Normal file
View file

@ -0,0 +1,292 @@
# Generic deployment documentation
> ## Getting help
>
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
## Installing Conduit
Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore
only offer Linux binaries.
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url:
**Stable versions:**
| CPU Architecture | Download stable version |
| ------------------------------------------- | --------------------------------------------------------------- |
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] |
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] |
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] |
These builds were created on and linked against the glibc version shipped with Debian bullseye.
If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself.
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
**Latest versions:**
| Target | Type | Download |
|-|-|-|
| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) |
| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) |
| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) |
| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) |
| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) |
```bash
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
$ sudo chmod +x /usr/local/bin/matrix-conduit
```
Alternatively, you may compile the binary yourself. First, install any dependencies:
```bash
# Debian
$ sudo apt install libclang-dev build-essential
# RHEL
$ sudo dnf install clang
```
Then, `cd` into the source tree of conduit-next and run:
```bash
$ cargo build --release
```
## Adding a Conduit user
While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows
you to make sure that the file permissions are correctly set up.
In Debian or RHEL, you can use this command to create a Conduit user:
```bash
sudo adduser --system conduit --group --disabled-login --no-create-home
```
## Forwarding ports in the firewall or the router
Conduit uses the ports 443 and 8448 both of which need to be open in the firewall.
If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config.
## Optional: Avoid port 8448
If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443:
```apache
# .well-known delegation on Apache
<Files "/.well-known/matrix/server">
ErrorDocument 200 '{"m.server": "your.server.name:443"}'
Header always set Content-Type application/json
Header always set Access-Control-Allow-Origin *
</Files>
```
[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/).
## Setting up a systemd service
Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your
server reboots. Simply paste the default systemd service you can find below into
`/etc/systemd/system/conduit.service`.
```systemd
[Unit]
Description=Conduit Matrix Server
After=network.target
[Service]
Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml"
User=conduit
Group=conduit
Restart=always
ExecStart=/usr/local/bin/matrix-conduit
[Install]
WantedBy=multi-user.target
```
Finally, run
```bash
$ sudo systemctl daemon-reload
```
## Creating the Conduit configuration file
Now we need to create the Conduit's config file in
`/etc/matrix-conduit/conduit.toml`. Paste in the contents of
[`conduit-example.toml`](../configuration.md) **and take a moment to read it.
You need to change at least the server name.**
You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended.
## Setting the correct file permissions
As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on
Debian or RHEL:
```bash
sudo chown -R root:root /etc/matrix-conduit
sudo chmod 755 /etc/matrix-conduit
```
If you use the default database path you also need to run this:
```bash
sudo mkdir -p /var/lib/matrix-conduit/
sudo chown -R conduit:conduit /var/lib/matrix-conduit/
sudo chmod 700 /var/lib/matrix-conduit/
```
## Setting up the Reverse Proxy
This depends on whether you use Apache, Caddy, Nginx or another web server.
### Apache
Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this:
```apache
# Requires mod_proxy and mod_proxy_http
#
# On Apache instance compiled from source,
# paste into httpd-ssl.conf or httpd.conf
Listen 8448
<VirtualHost *:443 *:8448>
ServerName your.server.name # EDIT THIS
AllowEncodedSlashes NoDecode
ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon
ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/
</VirtualHost>
```
**You need to make some edits again.** When you are done, run
```bash
# Debian
$ sudo systemctl reload apache2
# Installed from source
$ sudo apachectl -k graceful
```
### Caddy
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
```caddy
your.server.name, your.server.name:8448 {
reverse_proxy /_matrix/* 127.0.0.1:6167
}
```
That's it! Just start or enable the service and you're set.
```bash
$ sudo systemctl enable caddy
```
### Nginx
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
```nginx
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
listen 8448 ssl http2;
listen [::]:8448 ssl http2;
server_name your.server.name; # EDIT THIS
merge_slashes off;
# Nginx defaults to only allow 1MB uploads
# Increase this to allow posting large files such as videos
client_max_body_size 20M;
location /_matrix/ {
proxy_pass http://127.0.0.1:6167;
proxy_set_header Host $http_host;
proxy_buffering off;
proxy_read_timeout 5m;
}
ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS
ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS
ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS
include /etc/letsencrypt/options-ssl-nginx.conf;
}
```
**You need to make some edits again.** When you are done, run
```bash
$ sudo systemctl reload nginx
```
## SSL Certificate
If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step.
The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this:
```bash
# To use ECC for the private key,
# paste into /etc/letsencrypt/cli.ini:
# key-type = ecdsa
# elliptic-curve = secp384r1
$ sudo certbot -d your.server.name
```
[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured.
If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case youre already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost.
## You're done!
Now you can start Conduit with:
```bash
$ sudo systemctl start conduit
```
Set it to start automatically when your system boots with:
```bash
$ sudo systemctl enable conduit
```
## How do I know it works?
You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature.
You can also use these commands as a quick health check.
```bash
$ curl https://your.server.name/_matrix/client/versions
# If using port 8448
$ curl https://your.server.name:8448/_matrix/client/versions
```
- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/).
If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly.
# What's next?
## Audio/Video calls
For Audio/Video call functionality see the [TURN Guide](../turn.md).
## Appservices
If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md).

18
docs/deploying/nixos.md Normal file
View file

@ -0,0 +1,18 @@
# Conduit for NixOS
Conduit can be acquired by Nix from various places:
* The `flake.nix` at the root of the repo
* The `default.nix` at the root of the repo
* From Nixpkgs
The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so
(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to
configure Conduit.
If you want to run the latest code, you should get Conduit from the `flake.nix`
or `default.nix` and set [`services.matrix-conduit.package`][package]
appropriately.
[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit
[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package

13
docs/introduction.md Normal file
View file

@ -0,0 +1,13 @@
# Conduit
{{#include ../README.md:catchphrase}}
{{#include ../README.md:body}}
#### How can I deploy my own?
- [Deployment options](deploying.md)
If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md).
{{#include ../README.md:footer}}

25
docs/turn.md Normal file
View file

@ -0,0 +1,25 @@
# Setting up TURN/STURN
## General instructions
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
## Edit/Add a few settings to your existing conduit.toml
```
# Refer to your Coturn settings.
# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`.
turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"]
# static-auth-secret of your turnserver
turn_secret = "ADD SECRET HERE"
# If you have your TURN server configured to use a username and password
# you can provide these information too. In this case comment out `turn_secret above`!
#turn_username = ""
#turn_password = ""
```
## Apply settings
Restart Conduit.

View file

@ -35,25 +35,6 @@ name = "lychee"
group = "versions"
script = "lychee --version"
[[task]]
name = "markdownlint"
group = "versions"
script = "markdownlint --version"
[[task]]
name = "lychee"
group = "lints"
script = "lychee --offline ."
[[task]]
name = "markdownlint"
group = "lints"
# don't just use 'markdownlint .' because it will lint files that are ignored by
# git
script = """
git ls-files --cached --others --exclude-standard '*.md' | xargs markdownlint
"""
[[task]]
name = "cargo-fmt"
group = "lints"
@ -71,25 +52,17 @@ RUSTDOCFLAGS="-D warnings" cargo doc \
"""
[[task]]
name = "cargo-clippy/none"
group = "lints"
script = """
cargo clippy \
--workspace \
--all-targets \
--no-default-features \
--color=always \
-- \
-D warnings
"""
[[task]]
name = "cargo-clippy/default"
name = "cargo-clippy"
group = "lints"
script = "cargo clippy --workspace --all-targets --color=always -- -D warnings"
[[task]]
name = "cargo/default"
name = "lychee"
group = "lints"
script = "lychee --offline docs"
[[task]]
name = "cargo"
group = "tests"
script = """
cargo test \

177
flake.lock generated
View file

@ -4,17 +4,16 @@
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nix-github-actions": "nix-github-actions",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1752217044,
"narHash": "sha256-5TomR72rn4q+5poQcN6EnanxeXKqJSqWVAoDAFN0lUc=",
"lastModified": 1707922053,
"narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "24fad0622fc9404c69e83bab7738359c5be4988e",
"rev": "6eabc3f02fae3683bffab483e614bebfcd476b21",
"type": "github"
},
"original": {
@ -25,12 +24,18 @@
}
},
"crane": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1751562746,
"narHash": "sha256-smpugNIkmDeicNz301Ll1bD7nFOty97T79m4GUMUczA=",
"lastModified": 1702918879,
"narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=",
"owner": "ipetkov",
"repo": "crane",
"rev": "aed2020fd3dc26e1e857d4107a5a67a33ab6c1fd",
"rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb",
"type": "github"
},
"original": {
@ -40,12 +45,17 @@
}
},
"crane_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1752946753,
"narHash": "sha256-g5uP3jIj+STUcfTJDKYopxnSijs2agRg13H0SGL5iE4=",
"lastModified": 1713721181,
"narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=",
"owner": "ipetkov",
"repo": "crane",
"rev": "544d09fecc8c2338542c57f3f742f1a0c8c71e13",
"rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f",
"type": "github"
},
"original": {
@ -63,16 +73,15 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1752993983,
"narHash": "sha256-3YKCySMNhFDdHbFiRS4QbEwk0U5l42NMD1scDtniESY=",
"lastModified": 1709619709,
"narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=",
"owner": "nix-community",
"repo": "fenix",
"rev": "62105e0745d7450976b26dbd1497b8cbe15eb9ff",
"rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "main",
"repo": "fenix",
"type": "github"
}
@ -80,11 +89,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
@ -96,104 +105,74 @@
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"ref": "master",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1751413152,
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1709126324,
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "flake-utils",
"type": "github"
}
},
"nix-filter": {
"locked": {
"lastModified": 1731533336,
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
"lastModified": 1705332318,
"narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
"rev": "3449dc925982ad46246cfc36469baf66e1b64f17",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "nix-filter",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1737420293,
"narHash": "sha256-F1G5ifvqTpJq7fdkT34e/Jy9VCyzd5XfJ9TO8fHhJWE=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "f4158fa080ef4503c8f4c820967d946c2af31ec9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1751949589,
"narHash": "sha256-mgFxAPLWw0Kq+C8P3dRrZrOYEQXOtKuYVlo9xvPntt8=",
"lastModified": 1702539185,
"narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9b008d60392981ad674e04016d25619281550a9d",
"rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447",
"type": "github"
},
"original": {
@ -205,27 +184,27 @@
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1751741127,
"narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=",
"lastModified": 1702780907,
"narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "29e290002bfff26af1db6f64d070698019460302",
"rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1752950548,
"narHash": "sha256-NS6BLD0lxOrnCiEOcvQCDVPXafX1/ek1dfJHX1nUIzc=",
"lastModified": 1709479366,
"narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c87b95e25065c028d31a94f06a62927d18763fdf",
"rev": "b8697e57f10292a6165a20f03d2f42920dfaf973",
"type": "github"
},
"original": {
@ -235,43 +214,25 @@
"type": "github"
}
},
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1752084860,
"narHash": "sha256-mKh6zsmxsiUix4LX+npiytmKvLbo6WNA9y4Ns/EY+bE=",
"owner": "facebook",
"repo": "rocksdb",
"rev": "410c5623195ecbe4699b9b5a5f622c7325cec6fe",
"type": "github"
},
"original": {
"owner": "facebook",
"ref": "v10.4.2",
"repo": "rocksdb",
"type": "github"
}
},
"root": {
"inputs": {
"attic": "attic",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat_2",
"flake-utils": "flake-utils",
"flake-utils": "flake-utils_2",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_2",
"rocksdb": "rocksdb"
"nixpkgs": "nixpkgs_2"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1752913824,
"narHash": "sha256-kRpDlijAr4p5VmcPSRw2mfhaBZ4cE3EDWzqLDIbASgA=",
"lastModified": 1709571018,
"narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "ed193af36937d2fd4bb14a815ec589875c5c7304",
"rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e",
"type": "github"
},
"original": {

404
flake.nix
View file

@ -1,130 +1,312 @@
{
# Keep sorted
inputs = {
attic.url = "github:zhaofengli/attic?ref=main";
crane.url = "github:ipetkov/crane?ref=master";
fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; };
flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; };
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable";
rocksdb = { url = "github:facebook/rocksdb?ref=v10.4.2"; flake = false; };
flake-utils.url = "github:numtide/flake-utils";
nix-filter.url = "github:numtide/nix-filter";
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
crane = {
url = "github:ipetkov/crane?ref=master";
inputs.nixpkgs.follows = "nixpkgs";
};
attic.url = "github:zhaofengli/attic?ref=main";
};
outputs = inputs:
outputs =
{ self
, nixpkgs
, flake-utils
, nix-filter
, fenix
, crane
, ...
}: flake-utils.lib.eachDefaultSystem (system:
let
rust-manifest = builtins.fetchurl {
# Keep version in sync with rust-toolchain.toml
url = "https://static.rust-lang.org/dist/channel-rust-1.88.0.toml";
sha256 = "sha256-Qxt8XAuaUR2OMdKbN4u8dBJOhSHxS+uS06Wl9+flVEk=";
pkgsHost = nixpkgs.legacyPackages.${system};
# Nix-accessible `Cargo.toml`
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
# The Rust toolchain to use
toolchain = fenix.packages.${system}.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8=";
};
# Keep sorted
mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: {
craneLib =
(inputs.crane.mkLib pkgs).overrideToolchain (_: self.toolchain);
builder = pkgs:
((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage;
default = self.callPackage ./nix/pkgs/default {};
nativeBuildInputs = pkgs: [
# bindgen needs the build platform's libclang. Apparently due to
# "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't
# quite do the right thing here.
pkgs.pkgsBuildHost.rustPlatform.bindgenHook
];
inherit inputs;
# Return a new scope with overrides applied to the 'default' package
overrideDefaultPackage = args: self.overrideScope (final: prev: {
default = prev.default.override args;
});
rocksdb = self.callPackage ./nix/pkgs/rocksdb {
inherit (pkgs) rocksdb;
};
shell = self.callPackage ./nix/shell.nix {};
# The Rust toolchain to use
# Using fromManifestFile and parsing the toolchain file with importTOML
# instead of fromToolchainFile to avoid IFD
toolchain = let
toolchainFile = pkgs.lib.importTOML ./rust-toolchain.toml;
defaultProfileComponents = [
"rustc"
"cargo"
"rust-docs"
"rustfmt"
"clippy"
];
components = defaultProfileComponents ++
toolchainFile.toolchain.components;
targets = toolchainFile.toolchain.targets;
fenix = inputs.fenix.packages.${pkgs.stdenv.buildPlatform.system};
nativeToolchain = (fenix.fromManifestFile rust-manifest)
.withComponents components;
crossComponents = builtins.map
(target:
(fenix.targets.${target}.fromManifestFile rust-manifest)
.rust-std)
targets;
in
fenix.combine ([nativeToolchain] ++ crossComponents);
website-root = self.callPackage ./nix/pkgs/website-root {};
});
in
inputs.flake-utils.lib.eachDefaultSystem (system:
rocksdb' = pkgs:
let
pkgs = import inputs.nixpkgs {
inherit system;
# Some users find it useful to set this on their Nixpkgs instance and
# we want to support that use case, so we set it here too to help us
# test/ensure that this works.
config.allowAliases = false;
};
version = "9.1.0";
in
{
packages = {
default = (mkScope pkgs).default;
website-root = (mkScope pkgs).website-root;
}
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
pkgs.rocksdb.overrideAttrs (old: {
inherit version;
src = pkgs.fetchFromGitHub {
owner = "facebook";
repo = "rocksdb";
rev = "v${version}";
hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8=";
};
});
# Some users find it useful to set this on their Nixpkgs
# instance and we want to support that use case, so we set
# it here too to help us test/ensure that this works.
config.allowAliases = false;
}).pkgsStatic;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = (mkScope pkgsCrossStatic).default;
}
]
env = pkgs: {
CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev;
ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include";
ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib";
}
// pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
// {
CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in
lib.concatStringsSep " " ([]
++ lib.optionals
# This disables PIE for static builds, which isn't great in terms
# of security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
stdenv.hostPlatform.isStatic
["-C" "relocation-model=static"]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
["-l" "c"]
++ lib.optionals
# This check has to match the one [here][0]. We only need to set
# these flags when using a different linker. Don't ask me why,
# though, because I don't know. All I know is it breaks otherwise.
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40
(
# Nixpkgs doesn't check for x86_64 here but we do, because I
# observed a failure building statically for x86_64 without
# including it here. Linkers are weird.
(stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64)
&& stdenv.hostPlatform.isStatic
&& !stdenv.isDarwin
&& !stdenv.cc.bintools.isLLVM
)
[
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80
// (
let
inherit (pkgs.rust.lib) envVars;
in
pkgs.lib.optionalAttrs
(pkgs.stdenv.targetPlatform.rust.rustcTarget
!= pkgs.stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" =
envVars.linkerForTarget;
}
)
// (
let
inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
// (
let
inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild;
HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++";
}
));
package = pkgs: builder pkgs {
src = nix-filter {
root = ./.;
include = [
"src"
"Cargo.toml"
"Cargo.lock"
];
};
# This is redundant with CI
doCheck = false;
env = env pkgs;
nativeBuildInputs = nativeBuildInputs pkgs;
meta.mainProgram = cargoToml.package.name;
};
mkOciImage = pkgs: package:
pkgs.dockerTools.buildImage {
name = package.pname;
tag = "next";
copyToRoot = [
pkgs.dockerTools.caCertificates
];
config = {
# Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT)
# are handled as expected
Entrypoint = [
"${pkgs.lib.getExe' pkgs.tini "tini"}"
"--"
];
Cmd = [
"${pkgs.lib.getExe package}"
];
};
};
in
{
packages = {
default = package pkgsHost;
oci-image = mkOciImage pkgsHost self.packages.${system}.default;
book =
let
package = self.packages.${system}.default;
in
pkgsHost.stdenv.mkDerivation {
pname = "${package.pname}-book";
version = package.version;
src = nix-filter {
root = ./.;
include = [
"book.toml"
"conduit-example.toml"
"README.md"
"debian/README.md"
"docs"
];
};
nativeBuildInputs = (with pkgsHost; [
mdbook
]);
buildPhase = ''
mdbook build
mv public $out
'';
};
}
//
builtins.listToAttrs
(builtins.concatLists
(builtins.map
(crossSystem:
let
binaryName = "static-${crossSystem}";
pkgsCrossStatic =
(import nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = package pkgsCrossStatic;
}
# An output for an OCI image based on that binary
{
name = "oci-image-${crossSystem}";
value = mkOciImage
pkgsCrossStatic
self.packages.${system}.${binaryName};
}
]
)
);
[
"x86_64-unknown-linux-musl"
"aarch64-unknown-linux-musl"
]
)
);
devShells.default = (mkScope pkgs).shell;
}
)
//
{
nixosModules.default = import ./nix/modules/default inputs;
};
devShells.default = pkgsHost.mkShell {
env = env pkgsHost // {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
};
# Development tools
nativeBuildInputs = nativeBuildInputs pkgsHost ++ [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
fenix.packages.${system}.latest.rustfmt
toolchain
] ++ (with pkgsHost; [
engage
# Needed for producing Debian packages
cargo-deb
# Needed for Complement
go
olm
# Needed for our script for Complement
jq
# Needed for finding broken markdown links
lychee
# Useful for editing the book locally
mdbook
]);
};
});
}

View file

@ -1,154 +0,0 @@
inputs:
{ config
, lib
, pkgs
, ...
}:
let
inherit (lib) types;
cfg = config.services.grapevine;
configFile = format.generate "config.toml" cfg.settings;
validateConfig = file: pkgs.runCommand "grapevine-checked-config" {} ''
${lib.getExe cfg.package} check-config -sc ${lib.escapeShellArg file}
ln -s ${lib.escapeShellArg file} "$out"
'';
format = pkgs.formats.toml {};
in
{
options.services.grapevine = {
enable = lib.mkEnableOption "grapevine";
package = lib.mkPackageOption
inputs.self.packages.${pkgs.stdenv.hostPlatform.system}
"grapevine"
{
default = "default";
pkgsText = "inputs.grapevine.packages.\${pkgs.stdenv.hostPlatform.system}";
};
settings = lib.mkOption {
type = types.submodule {
freeformType = format.type;
options = {
conduit_compat = lib.mkOption {
type = types.bool;
description = ''
Whether to operate as a drop-in replacement for Conduit.
'';
default = false;
};
database = {
backend = lib.mkOption {
type = types.nonEmptyStr;
description = ''
The database backend to use.
'';
default = "rocksdb";
};
path = lib.mkOption {
type = types.nonEmptyStr;
readOnly = true;
description = ''
The path to store database files in.
Note that this is read-only because this module makes use of
systemd's `StateDirectory` option.
'';
default = if cfg.settings.conduit_compat
then "/var/lib/matrix-conduit/database"
else "/var/lib/grapevine/database";
};
};
media.backend = {
type = lib.mkOption {
type = types.nonEmptyStr;
readOnly = true;
description = ''
The media backend to use.
Note that this is read-only because `filesystem` is currently
the only valid option.
'';
default = "filesystem";
};
path = lib.mkOption {
type = types.nonEmptyStr;
readOnly = true;
description = ''
The path to store database files in.
Note that this is read-only because this module makes use of
systemd's `StateDirectory` option.
'';
default = if cfg.settings.conduit_compat
then "/var/lib/matrix-conduit/media"
else "/var/lib/grapevine/media";
};
};
listen = lib.mkOption {
type = types.listOf format.type;
description = ''
List of places to listen for incoming connections.
'';
default = [
{
type = "tcp";
address = "::1";
port = 6167;
}
];
};
};
};
default = {};
description = ''
The TOML configuration file is generated from this attribute set.
'';
};
};
config = lib.mkIf cfg.enable {
systemd.services.grapevine = {
description = "Grapevine (Matrix homeserver)";
wantedBy = [ "multi-user.target" ];
# Keep sorted
serviceConfig = {
DynamicUser = true;
ExecStart = "${lib.getExe cfg.package} serve --config ${validateConfig configFile}";
LockPersonality = true;
MemoryDenyWriteExecute = true;
PrivateDevices = true;
PrivateMounts = true;
PrivateUsers = true;
ProtectClock = true;
ProtectControlGroups = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
Restart = "on-failure";
RestartSec = 10;
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
RestrictNamespaces = true;
RestrictRealtime = true;
StartLimitBurst = 5;
StateDirectory = if cfg.settings.conduit_compat
then "matrix-conduit"
else "grapevine";
StateDirectoryMode = "0700";
SystemCallArchitectures = "native";
SystemCallFilter = [ "@system-service" "~@privileged" ];
TimeoutStartSec = "infinity";
Type = "notify";
UMask = "077";
User = if cfg.settings.conduit_compat
then "conduit"
else "grapevine";
};
};
};
}

View file

@ -1,95 +0,0 @@
# Keep sorted
{ lib
, pkgsBuildHost
, rust
, snappy
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic {
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
([]
# This disables PIE for static builds, which isn't great in terms of
# security. Unfortunately, my hand is forced because nixpkgs'
# `libstdc++.a` is built without `-fPIE`, which precludes us from
# leaving PIE enabled.
++ lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
# I'm not sure why any of this is necessary but it is so *shrug*
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
"-l"
"snappy"
"-L"
"${snappy}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly configure
# compilers and linkers for various stages of the build, and even covers the
# case of build scripts that need native code compiled and run on the build
# platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/2768c7d042a37de65bb1b5b3268fc987e534c49d/pkgs/build-support/rust/lib/default.nix#L45-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
//
(
let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
)

View file

@ -1,109 +0,0 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, pkgsBuildHost
, rocksdb
, rust
, snappy
, stdenv
# Options (keep sorted)
, default-features ? true
, features ? []
, profile ? "release"
, version-extra ? inputs.self.shortRev
or inputs.self.dirtyShortRev
or null,
}:
let
cargoManifest = lib.importTOML "${inputs.self}/Cargo.toml";
buildDepsOnlyEnv =
{
NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb}/include";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
snappy
stdenv;
});
buildPackageEnv =
(lib.optionalAttrs (version-extra != null) {
GRAPEVINE_VERSION_EXTRA = version-extra;
})
// buildDepsOnlyEnv;
commonAttrs = {
# Reading from cargoManifest directly instead of using
# createNameFromCargoToml to avoid IFD
pname = cargoManifest.package.name;
version = cargoManifest.package.version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo/config.toml"
"Cargo.lock"
"Cargo.toml"
"src"
];
};
dontStrip = profile != "release";
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
# Opt out of crane's automagic cross support
doIncludeCrossToolchainEnv = false;
# This is redundant with CI
doCheck = false;
};
in
craneLib.buildPackage (commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
cargoExtraArgs = "--locked "
+ lib.optionalString
(!default-features)
"--no-default-features "
+ lib.optionalString
(features != [])
"--features " + (builtins.concatStringsSep "," features);
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

View file

@ -1,40 +0,0 @@
# Dependencies (keep sorted)
{ inputs
, lib
, rocksdb
}:
let
rocksdb' = (rocksdb.overrideAttrs (old: {
src = inputs.rocksdb;
version = lib.removePrefix
"v"
(builtins.fromJSON
(builtins.readFile ../../../flake.lock)
).nodes.rocksdb.original.ref;
})).override {
enableLiburing = false;
};
cVersion = rocksdb'.version;
rustVersion = builtins.elemAt
(lib.splitString
"+"
(lib.findSingle
(x: x.name == "rust-librocksdb-sys")
(builtins.throw "Multiple rust-librocksdb-sys versions in Cargo.lock")
(builtins.throw "No rust-librocksdb-sys versions in Cargo.lock")
(builtins.fromTOML (builtins.readFile ../../../Cargo.lock)).package
).version
)
1;
in
if cVersion == rustVersion
then rocksdb'
else builtins.throw
(builtins.concatStringsSep " " [
"C version (${cVersion}) and Rust version (${rustVersion}) of RocksDB do"
"not match"
])

View file

@ -1,28 +0,0 @@
# Keep sorted
{ inputs
, lib
, mdbook
, stdenv
}:
stdenv.mkDerivation {
name = "website-root";
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
"book"
"book.toml"
];
};
buildPhase = ''
${lib.getExe mdbook} build
'';
installPhase = ''
mv target/book $out
'';
}

View file

@ -1,55 +0,0 @@
# Keep sorted
{ cargo-insta
, default
, engage
, findutils
, inputs
, jq
, lychee
, markdownlint-cli
, mdbook
, mkShell
, ripgrep
, stdenv
, toolchain
}:
mkShell {
env = default.env // {
# Rust Analyzer needs to be able to find the path to default crate
# sources, and it can read this environment variable to do so. The
# `rust-src` component is required in order for this to work.
RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library";
};
# Development tools
nativeBuildInputs = [
# Always use nightly rustfmt because most of its options are unstable
#
# This needs to come before `toolchain` in this list, otherwise
# `$PATH` will have stable rustfmt instead.
inputs.fenix.packages.${stdenv.buildPlatform.system}.latest.rustfmt
# Keep sorted
cargo-insta
engage
findutils
jq
lychee
markdownlint-cli
mdbook
ripgrep
toolchain
]
++
default.nativeBuildInputs
++
default.propagatedBuildInputs
++
default.buildInputs;
shellHook = ''
# Workaround for <https://github.com/NixOS/nix/issues/8355>
unset TMPDIR
'';
}

View file

@ -2,6 +2,7 @@
#
# Other files that need upkeep when this changes:
#
# * `.gitlab-ci.yml`
# * `Cargo.toml`
# * `flake.nix`
#
@ -9,7 +10,7 @@
# If you're having trouble making the relevant changes, bug a maintainer.
[toolchain]
channel = "1.88.0"
channel = "1.75.0"
components = [
# For rust-analyzer
"rust-src",
@ -17,6 +18,5 @@ components = [
targets = [
"x86_64-unknown-linux-gnu",
"x86_64-unknown-linux-musl",
"aarch64-unknown-linux-gnu",
"aarch64-unknown-linux-musl",
]

View file

@ -1,18 +1,2 @@
edition = "2021"
condense_wildcard_suffixes = true
error_on_line_overflow = true
format_code_in_doc_comments = true
format_macro_bodies = true
format_macro_matchers = true
format_strings = true
group_imports = "StdExternalCrate"
hex_literal_case = "Upper"
imports_granularity = "Crate"
max_width = 80
newline_style = "Unix"
reorder_impl_items = true
use_field_init_shorthand = true
use_small_heuristics = "Off"
use_try_shorthand = true
wrap_comments = true
unstable_features = true
imports_granularity="Crate"

View file

@ -1,5 +0,0 @@
pub(crate) mod appservice_server;
pub(crate) mod client_server;
pub(crate) mod ruma_wrapper;
pub(crate) mod server_server;
pub(crate) mod well_known;

View file

@ -1,28 +1,27 @@
use std::{fmt::Debug, mem, time::Duration};
use crate::{services, utils, Error, Result};
use bytes::BytesMut;
use ruma::api::{
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest,
SendAccessToken,
appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken,
};
use std::{fmt::Debug, mem, time::Duration};
use tracing::warn;
use crate::{services, utils, Error, Result};
/// Sends a request to an appservice
///
/// Only returns None if there is no url specified in the appservice
/// registration file
/// Only returns None if there is no url specified in the appservice registration file
#[tracing::instrument(skip(request))]
pub(crate) async fn send_request<T>(
pub(crate) async fn send_request<T: OutgoingRequest>(
registration: Registration,
request: T,
) -> Result<Option<T::IncomingResponse>>
where
T: OutgoingRequest + Debug,
T: Debug,
{
let Some(destination) = registration.url else {
return Ok(None);
let destination = match registration.url {
Some(url) => url,
None => {
return Ok(None);
}
};
let hs_token = registration.hs_token.as_str();
@ -34,7 +33,7 @@ where
&[MatrixVersion::V1_0],
)
.unwrap()
.map(BytesMut::freeze);
.map(|body| body.freeze());
let mut parts = http_request.uri().clone().into_parts();
let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned();
@ -45,36 +44,38 @@ where
};
parts.path_and_query = Some(
format!("{old_path_and_query}{symbol}access_token={hs_token}")
(old_path_and_query + symbol + "access_token=" + hs_token)
.parse()
.unwrap(),
);
*http_request.uri_mut() =
parts.try_into().expect("our manipulation is always valid");
*http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid");
let mut reqwest_request = reqwest::Request::try_from(http_request)?;
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
let url = reqwest_request.url().clone();
let mut response = services()
let mut response = match services()
.globals
.default_client()
.execute(reqwest_request)
.await
.inspect_err(|error| {
{
Ok(r) => r,
Err(e) => {
warn!(
%error,
appservice = registration.id,
%destination,
"Could not send request to appservice",
"Could not send request to appservice {:?} at {}: {}",
registration.id, destination, e
);
})?;
return Err(e.into());
}
};
// reqwest::Response -> http::Response conversion
let status = response.status();
let mut http_response_builder =
http::Response::builder().status(status).version(response.version());
let mut http_response_builder = http::Response::builder()
.status(status)
.version(response.version());
mem::swap(
response.headers_mut(),
http_response_builder
@ -82,22 +83,18 @@ where
.expect("http::response::Builder is usable"),
);
// TODO: handle timeout
let body = response.bytes().await.unwrap_or_else(|error| {
warn!(%error, "Server error");
let body = response.bytes().await.unwrap_or_else(|e| {
warn!("server error: {}", e);
Vec::new().into()
});
}); // TODO: handle timeout
if status != 200 {
warn!(
appservice = %destination,
%status,
%url,
body = %utils::dbg_truncate_str(
String::from_utf8_lossy(&body).as_ref(),
100,
),
"Appservice returned bad response",
"Appservice returned bad response {} {}\n{}\n{:?}",
destination,
status,
url,
utils::string_from_bytes(&body)
);
}
@ -107,12 +104,10 @@ where
.expect("reqwest body is valid http body"),
);
response.map(Some).map_err(|error| {
response.map(Some).map_err(|_| {
warn!(
%error,
appservice = %destination,
%url,
"Appservice returned invalid response bytes",
"Appservice returned invalid response bytes {}\n{}",
destination, url
);
Error::BadServerResponse("Server returned bad response.")
})

View file

@ -1,72 +0,0 @@
mod account;
mod alias;
mod backup;
mod capabilities;
mod config;
mod context;
mod device;
mod directory;
mod filter;
mod keys;
mod media;
mod membership;
mod message;
mod profile;
mod push;
mod read_marker;
mod redact;
mod relations;
mod report;
mod room;
mod search;
mod session;
mod space;
mod state;
mod sync;
mod tag;
mod thirdparty;
mod threads;
mod to_device;
mod typing;
mod unversioned;
mod user_directory;
mod voip;
pub(crate) use account::*;
pub(crate) use alias::*;
pub(crate) use backup::*;
pub(crate) use capabilities::*;
pub(crate) use config::*;
pub(crate) use context::*;
pub(crate) use device::*;
pub(crate) use directory::*;
pub(crate) use filter::*;
pub(crate) use keys::*;
pub(crate) use media::*;
pub(crate) use membership::*;
pub(crate) use message::*;
pub(crate) use profile::*;
pub(crate) use push::*;
pub(crate) use read_marker::*;
pub(crate) use redact::*;
pub(crate) use relations::*;
pub(crate) use report::*;
pub(crate) use room::*;
pub(crate) use search::*;
pub(crate) use session::*;
pub(crate) use space::*;
pub(crate) use state::*;
pub(crate) use sync::*;
pub(crate) use tag::*;
pub(crate) use thirdparty::*;
pub(crate) use threads::*;
pub(crate) use to_device::*;
pub(crate) use typing::*;
pub(crate) use unversioned::*;
pub(crate) use user_directory::*;
pub(crate) use voip::*;
pub(crate) const DEVICE_ID_LENGTH: usize = 10;
pub(crate) const TOKEN_LENGTH: usize = 32;
pub(crate) const SESSION_ID_LENGTH: usize = 32;
pub(crate) const AUTO_GEN_PASSWORD_LENGTH: usize = 15;

View file

@ -1,25 +1,22 @@
use register::RegistrationKind;
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{api::client_server, services, utils, Error, Result, Ruma};
use ruma::{
api::client::{
account::{
change_password, deactivate, get_3pids, get_username_availability,
register::{self, LoginType},
request_3pid_management_token_via_email,
request_3pid_management_token_via_msisdn, whoami,
ThirdPartyIdRemovalStatus,
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
whoami, ThirdPartyIdRemovalStatus,
},
error::ErrorKind,
uiaa::{AuthFlow, AuthType, UiaaInfo},
},
events::room::message::RoomMessageEventContent,
push,
serde::Raw,
UserId,
events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType},
push, UserId,
};
use tracing::{info, warn};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{api::client_server, services, utils, Ar, Error, Ra, Result};
use register::RegistrationKind;
const RANDOM_USER_ID_LENGTH: usize = 10;
@ -32,11 +29,10 @@ const RANDOM_USER_ID_LENGTH: usize = 10;
/// - The server name of the user id matches this server
/// - No user or appservice on this server already claimed this username
///
/// Note: This will not reserve the username, so the username might become
/// invalid when trying to register
pub(crate) async fn get_register_available_route(
body: Ar<get_username_availability::v3::Request>,
) -> Result<Ra<get_username_availability::v3::Response>> {
/// Note: This will not reserve the username, so the username might become invalid when trying to register
pub async fn get_register_available_route(
body: Ruma<get_username_availability::v3::Request>,
) -> Result<get_username_availability::v3::Response> {
// Validate user id
let user_id = UserId::parse_with_server_name(
body.username.to_lowercase(),
@ -44,8 +40,7 @@ pub(crate) async fn get_register_available_route(
)
.ok()
.filter(|user_id| {
!user_id.is_historical()
&& user_id.server_name() == services().globals.server_name()
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
})
.ok_or(Error::BadRequest(
ErrorKind::InvalidUsername,
@ -63,37 +58,26 @@ pub(crate) async fn get_register_available_route(
// TODO add check for appservice namespaces
// If no if check is true we have an username that's available to be used.
Ok(Ra(get_username_availability::v3::Response {
available: true,
}))
Ok(get_username_availability::v3::Response { available: true })
}
/// # `POST /_matrix/client/r0/register`
///
/// Register an account on this homeserver.
///
/// You can use [`GET
/// /_matrix/client/r0/register/available`](get_register_available_route)
/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
/// to check if the user id is valid and available.
///
/// - Only works if registration is enabled
/// - If type is guest: ignores all parameters except
/// `initial_device_display_name`
/// - If type is guest: ignores all parameters except initial_device_display_name
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
/// - If type is not guest and no username is given: Always fails after UIAA
/// check
/// - If type is not guest and no username is given: Always fails after UIAA check
/// - Creates a new account and populates it with default account data
/// - If `inhibit_login` is false: Creates a device and returns `device_id` and
/// `access_token`
#[allow(clippy::too_many_lines)]
pub(crate) async fn register_route(
body: Ar<register::v3::Request>,
) -> Result<Ra<register::v3::Response>> {
if !services().globals.allow_registration()
&& body.appservice_info.is_none()
{
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
if !services().globals.allow_registration() && body.appservice_info.is_none() {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Registration has been disabled.",
));
}
@ -136,7 +120,7 @@ pub(crate) async fn register_route(
};
if body.body.login_type == Some(LoginType::ApplicationService) {
if let Some(info) = &body.appservice_info {
if let Some(ref info) = body.appservice_info {
if !info.is_user_match(&user_id) {
return Err(Error::BadRequest(
ErrorKind::Exclusive,
@ -165,20 +149,19 @@ pub(crate) async fn register_route(
stages: vec![AuthType::RegistrationToken],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
body.appservice_info.is_some()
} else {
// No registration token necessary, but clients must still go through
// the flow
// No registration token necessary, but clients must still go through the flow
uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Dummy],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
@ -188,32 +171,26 @@ pub(crate) async fn register_route(
if !skip_auth {
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
&UserId::parse_with_server_name(
"",
services().globals.server_name(),
)
.expect("we know this is valid"),
&UserId::parse_with_server_name("", services().globals.server_name())
.expect("we know this is valid"),
"".into(),
auth,
&uiaainfo,
)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(
&UserId::parse_with_server_name(
"",
services().globals.server_name(),
)
.expect("we know this is valid"),
&UserId::parse_with_server_name("", services().globals.server_name())
.expect("we know this is valid"),
"".into(),
&uiaainfo,
&json,
)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
@ -229,28 +206,39 @@ pub(crate) async fn register_route(
services().users.create(&user_id, password)?;
// Default to pretty displayname
let displayname = user_id.localpart().to_owned();
let mut displayname = user_id.localpart().to_owned();
services().users.set_displayname(&user_id, Some(displayname.clone()))?;
// If enabled append lightning bolt to display name (default true)
if services().globals.enable_lightning_bolt() {
displayname.push_str(" ⚡️");
}
services()
.users
.set_displayname(&user_id, Some(displayname.clone()))?;
// Initial account data
services().account_data.update_global(
services().account_data.update(
None,
&user_id,
&Raw::new(&ruma::events::push_rules::PushRulesEventContent {
global: push::Ruleset::server_default(&user_id),
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
content: ruma::events::push_rules::PushRulesEventContent {
global: push::Ruleset::server_default(&user_id),
},
})
.expect("constructed event should be valid"),
.expect("to json always works"),
)?;
// Inhibit login does not work for guests
if !is_guest && body.inhibit_login {
return Ok(Ra(register::v3::Response {
return Ok(register::v3::Response {
access_token: None,
user_id,
device_id: None,
refresh_token: None,
expires_in: None,
}));
});
}
// Generate new device id if the user didn't specify one
@ -272,37 +260,42 @@ pub(crate) async fn register_route(
body.initial_device_display_name.clone(),
)?;
info!(%user_id, "New user registered on this server");
info!("New user {} registered on this server.", user_id);
if body.appservice_info.is_none() && !is_guest {
services().admin.send_message(RoomMessageEventContent::notice_plain(
format!("New user {user_id} registered on this server."),
));
services()
.admin
.send_message(RoomMessageEventContent::notice_plain(format!(
"New user {user_id} registered on this server."
)));
}
// If this is the first real user, grant them admin privileges
// Note: the server user, @grapevine:servername, is generated first
// Note: the server user, @conduit:servername, is generated first
if !is_guest {
if let Some(admin_room) = services().admin.get_admin_room()? {
if services().rooms.state_cache.room_joined_count(&admin_room)?
if services()
.rooms
.state_cache
.room_joined_count(&admin_room)?
== Some(1)
{
services().admin.make_user_admin(&user_id, displayname).await?;
services()
.admin
.make_user_admin(&user_id, displayname)
.await?;
warn!(
%user_id,
"Granting admin privileges to the first user",
);
warn!("Granting {} admin privileges as the first user", user_id);
}
}
}
Ok(Ra(register::v3::Response {
Ok(register::v3::Response {
access_token: Some(token),
user_id,
device_id: Some(device_id),
refresh_token: None,
expires_in: None,
}))
})
}
/// # `POST /_matrix/client/r0/account/password`
@ -311,91 +304,89 @@ pub(crate) async fn register_route(
///
/// - Requires UIAA to verify user password
/// - Changes the password of the sender user
/// - The password hash is calculated using argon2 with 32 character salt, the
/// plain password is not saved
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
/// not saved
///
/// If `logout_devices` is true it does the following for each device except the
/// sender device:
/// If logout_devices is true it does the following for each device except the sender device:
/// - Invalidates access token
/// - Deletes device metadata (device ID, device display name, last seen IP,
/// last seen timestamp)
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
pub(crate) async fn change_password_route(
body: Ar<change_password::v3::Request>,
) -> Result<Ra<change_password::v3::Response>> {
pub async fn change_password_route(
body: Ruma<change_password::v3::Request>,
) -> Result<change_password::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
sender_user,
sender_device,
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) =
services()
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
services()
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
services().users.set_password(sender_user, Some(&body.new_password))?;
services()
.users
.set_password(sender_user, Some(&body.new_password))?;
if body.logout_devices {
// Logout all devices except the current one
for id in services()
.users
.all_device_ids(sender_user)
.filter_map(Result::ok)
.filter_map(|id| id.ok())
.filter(|id| id != sender_device)
{
services().users.remove_device(sender_user, &id)?;
}
}
info!(user_id = %sender_user, "User changed their password");
services().admin.send_message(RoomMessageEventContent::notice_plain(
format!("User {sender_user} changed their password."),
));
info!("User {} changed their password.", sender_user);
services()
.admin
.send_message(RoomMessageEventContent::notice_plain(format!(
"User {sender_user} changed their password."
)));
Ok(Ra(change_password::v3::Response {}))
Ok(change_password::v3::Response {})
}
/// # `GET _matrix/client/r0/account/whoami`
///
/// Get `user_id` of the sender user.
/// Get user_id of the sender user.
///
/// Note: Also works for Application Services
pub(crate) async fn whoami_route(
body: Ar<whoami::v3::Request>,
) -> Result<Ra<whoami::v3::Response>> {
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let device_id = body.sender_device.clone();
let device_id = body.sender_device.as_ref().cloned();
Ok(Ra(whoami::v3::Response {
Ok(whoami::v3::Response {
user_id: sender_user.clone(),
device_id,
is_guest: services().users.is_deactivated(sender_user)?
&& body.appservice_info.is_none(),
}))
is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(),
})
}
/// # `POST /_matrix/client/r0/account/deactivate`
@ -404,43 +395,41 @@ pub(crate) async fn whoami_route(
///
/// - Leaves all rooms and rejects all invitations
/// - Invalidates all access tokens
/// - Deletes all device metadata (device id, device display name, last seen ip,
/// last seen ts)
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets all to-device events
/// - Triggers device list updates
/// - Removes ability to log in again
pub(crate) async fn deactivate_route(
body: Ar<deactivate::v3::Request>,
) -> Result<Ra<deactivate::v3::Response>> {
pub async fn deactivate_route(
body: Ruma<deactivate::v3::Request>,
) -> Result<deactivate::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut uiaainfo = UiaaInfo {
flows: vec![AuthFlow {
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
sender_user,
sender_device,
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) =
services()
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
services()
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
@ -451,14 +440,16 @@ pub(crate) async fn deactivate_route(
// Remove devices and mark account as deactivated
services().users.deactivate_account(sender_user)?;
info!(user_id = %sender_user, "User deactivated their account");
services().admin.send_message(RoomMessageEventContent::notice_plain(
format!("User {sender_user} deactivated their account."),
));
info!("User {} deactivated their account.", sender_user);
services()
.admin
.send_message(RoomMessageEventContent::notice_plain(format!(
"User {sender_user} deactivated their account."
)));
Ok(Ra(deactivate::v3::Response {
Ok(deactivate::v3::Response {
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
}))
})
}
/// # `GET _matrix/client/v3/account/3pid`
@ -466,25 +457,22 @@ pub(crate) async fn deactivate_route(
/// Get a list of third party identifiers associated with this account.
///
/// - Currently always returns empty list
pub(crate) async fn third_party_route(
body: Ar<get_3pids::v3::Request>,
) -> Result<Ra<get_3pids::v3::Response>> {
let _sender_user =
body.sender_user.as_ref().expect("user is authenticated");
pub async fn third_party_route(
body: Ruma<get_3pids::v3::Request>,
) -> Result<get_3pids::v3::Response> {
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(Ra(get_3pids::v3::Response::new(Vec::new())))
Ok(get_3pids::v3::Response::new(Vec::new()))
}
/// # `POST /_matrix/client/v3/account/3pid/email/requestToken`
///
/// "This API should be used to request validation tokens when adding an email
/// address to an account"
/// "This API should be used to request validation tokens when adding an email address to an account"
///
/// - 403 signals that The homeserver does not allow the third party identifier
/// as a contact option.
pub(crate) async fn request_3pid_management_token_via_email_route(
_body: Ar<request_3pid_management_token_via_email::v3::Request>,
) -> Result<Ra<request_3pid_management_token_via_email::v3::Response>> {
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
pub async fn request_3pid_management_token_via_email_route(
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
) -> Result<request_3pid_management_token_via_email::v3::Response> {
Err(Error::BadRequest(
ErrorKind::ThreepidDenied,
"Third party identifier is not allowed",
@ -493,14 +481,12 @@ pub(crate) async fn request_3pid_management_token_via_email_route(
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
///
/// "This API should be used to request validation tokens when adding an phone
/// number to an account"
/// "This API should be used to request validation tokens when adding an phone number to an account"
///
/// - 403 signals that The homeserver does not allow the third party identifier
/// as a contact option.
pub(crate) async fn request_3pid_management_token_via_msisdn_route(
_body: Ar<request_3pid_management_token_via_msisdn::v3::Request>,
) -> Result<Ra<request_3pid_management_token_via_msisdn::v3::Response>> {
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
pub async fn request_3pid_management_token_via_msisdn_route(
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
Err(Error::BadRequest(
ErrorKind::ThreepidDenied,
"Third party identifier is not allowed",

View file

@ -1,7 +1,8 @@
use crate::{services, Error, Result, Ruma};
use rand::seq::SliceRandom;
use ruma::{
api::{
appservice::query::query_room_alias,
appservice,
client::{
alias::{create_alias, delete_alias, get_alias},
error::ErrorKind,
@ -11,17 +12,12 @@ use ruma::{
OwnedRoomAliasId,
};
use crate::{services, Ar, Error, Ra, Result};
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
///
/// Creates a new room alias on this server.
pub(crate) async fn create_alias_route(
body: Ar<create_alias::v3::Request>,
) -> Result<Ra<create_alias::v3::Response>> {
let sender_user =
body.sender_user.as_deref().expect("user is authenticated");
pub async fn create_alias_route(
body: Ruma<create_alias::v3::Request>,
) -> Result<create_alias::v3::Response> {
if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -29,31 +25,39 @@ pub(crate) async fn create_alias_route(
));
}
if let Some(info) = &body.appservice_info {
if let Some(ref info) = body.appservice_info {
if !info.aliases.is_match(body.room_alias.as_str()) {
return Err(Error::BadRequest(
ErrorKind::Exclusive,
"Room alias is not in namespace.",
));
}
} else if services().appservice.is_exclusive_alias(&body.room_alias).await {
} else if services()
.appservice
.is_exclusive_alias(&body.room_alias)
.await
{
return Err(Error::BadRequest(
ErrorKind::Exclusive,
"Room alias reserved by appservice.",
));
}
if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() {
if services()
.rooms
.alias
.resolve_local_alias(&body.room_alias)?
.is_some()
{
return Err(Error::Conflict("Alias already exists."));
}
services().rooms.alias.set_alias(
&body.room_alias,
&body.room_id,
sender_user,
)?;
services()
.rooms
.alias
.set_alias(&body.room_alias, &body.room_id)?;
Ok(Ra(create_alias::v3::Response::new()))
Ok(create_alias::v3::Response::new())
}
/// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}`
@ -62,12 +66,9 @@ pub(crate) async fn create_alias_route(
///
/// - TODO: additional access control checks
/// - TODO: Update canonical alias event
pub(crate) async fn delete_alias_route(
body: Ar<delete_alias::v3::Request>,
) -> Result<Ra<delete_alias::v3::Response>> {
let sender_user =
body.sender_user.as_deref().expect("user is authenticated");
pub async fn delete_alias_route(
body: Ruma<delete_alias::v3::Request>,
) -> Result<delete_alias::v3::Response> {
if body.room_alias.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -75,25 +76,29 @@ pub(crate) async fn delete_alias_route(
));
}
if let Some(info) = &body.appservice_info {
if let Some(ref info) = body.appservice_info {
if !info.aliases.is_match(body.room_alias.as_str()) {
return Err(Error::BadRequest(
ErrorKind::Exclusive,
"Room alias is not in namespace.",
));
}
} else if services().appservice.is_exclusive_alias(&body.room_alias).await {
} else if services()
.appservice
.is_exclusive_alias(&body.room_alias)
.await
{
return Err(Error::BadRequest(
ErrorKind::Exclusive,
"Room alias reserved by appservice.",
));
}
services().rooms.alias.remove_alias(&body.room_alias, sender_user)?;
services().rooms.alias.remove_alias(&body.room_alias)?;
// TODO: update alt_aliases?
Ok(Ra(delete_alias::v3::Response::new()))
Ok(delete_alias::v3::Response::new())
}
/// # `GET /_matrix/client/r0/directory/room/{roomAlias}`
@ -101,14 +106,12 @@ pub(crate) async fn delete_alias_route(
/// Resolve an alias locally or over federation.
///
/// - TODO: Suggest more servers to join via
pub(crate) async fn get_alias_route(
body: Ar<get_alias::v3::Request>,
) -> Result<Ra<get_alias::v3::Response>> {
get_alias_helper(body.body.room_alias).await.map(Ra)
pub async fn get_alias_route(
body: Ruma<get_alias::v3::Request>,
) -> Result<get_alias::v3::Response> {
get_alias_helper(body.body.room_alias).await
}
// Can't use `services().rooms.alias.resolve_alias` because we also need the set
// of servers from the remote get_room_information request.
pub(crate) async fn get_alias_helper(
room_alias: OwnedRoomAliasId,
) -> Result<get_alias::v3::Response> {
@ -118,7 +121,7 @@ pub(crate) async fn get_alias_helper(
.send_federation_request(
room_alias.server_name(),
federation::query::get_room_information::v1::Request {
room_alias: room_alias.clone(),
room_alias: room_alias.to_owned(),
},
)
.await?;
@ -140,7 +143,7 @@ pub(crate) async fn get_alias_helper(
.sending
.send_appservice_request(
appservice.registration.clone(),
query_room_alias::v1::Request {
appservice::query::query_room_alias::v1::Request {
room_alias: room_alias.clone(),
},
)
@ -154,23 +157,23 @@ pub(crate) async fn get_alias_helper(
.alias
.resolve_local_alias(&room_alias)?
.ok_or_else(|| {
Error::bad_config(
"Appservice lied to us. Room does not \
exist.",
)
Error::bad_config("Appservice lied to us. Room does not exist.")
})?,
);
break;
}
}
}
}
};
let Some(room_id) = room_id else {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Room with alias not found.",
));
let room_id = match room_id {
Some(room_id) => room_id,
None => {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Room with alias not found.",
))
}
};
Ok(get_alias::v3::Response::new(

View file

@ -1,54 +1,49 @@
use crate::{services, Error, Result, Ruma};
use ruma::api::client::{
backup::{
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
delete_backup_keys_for_session, delete_backup_version, get_backup_info,
get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session,
get_latest_backup_info, update_backup_version,
delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys,
get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
update_backup_version,
},
error::ErrorKind,
};
use crate::{services, Ar, Error, Ra, Result};
/// # `POST /_matrix/client/r0/room_keys/version`
///
/// Creates a new backup.
pub(crate) async fn create_backup_version_route(
body: Ar<create_backup_version::v3::Request>,
) -> Result<Ra<create_backup_version::v3::Response>> {
pub async fn create_backup_version_route(
body: Ruma<create_backup_version::v3::Request>,
) -> Result<create_backup_version::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let version =
services().key_backups.create_backup(sender_user, &body.algorithm)?;
let version = services()
.key_backups
.create_backup(sender_user, &body.algorithm)?;
Ok(Ra(create_backup_version::v3::Response {
version,
}))
Ok(create_backup_version::v3::Response { version })
}
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
///
/// Update information about an existing backup. Only `auth_data` can be
/// modified.
pub(crate) async fn update_backup_version_route(
body: Ar<update_backup_version::v3::Request>,
) -> Result<Ra<update_backup_version::v3::Response>> {
/// Update information about an existing backup. Only `auth_data` can be modified.
pub async fn update_backup_version_route(
body: Ruma<update_backup_version::v3::Request>,
) -> Result<update_backup_version::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().key_backups.update_backup(
sender_user,
&body.version,
&body.algorithm,
)?;
services()
.key_backups
.update_backup(sender_user, &body.version, &body.algorithm)?;
Ok(Ra(update_backup_version::v3::Response {}))
Ok(update_backup_version::v3::Response {})
}
/// # `GET /_matrix/client/r0/room_keys/version`
///
/// Get information about the latest backup version.
pub(crate) async fn get_latest_backup_info_route(
body: Ar<get_latest_backup_info::v3::Request>,
) -> Result<Ra<get_latest_backup_info::v3::Response>> {
pub async fn get_latest_backup_info_route(
body: Ruma<get_latest_backup_info::v3::Request>,
) -> Result<get_latest_backup_info::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let (version, algorithm) = services()
@ -59,24 +54,20 @@ pub(crate) async fn get_latest_backup_info_route(
"Key backup does not exist.",
))?;
Ok(Ra(get_latest_backup_info::v3::Response {
Ok(get_latest_backup_info::v3::Response {
algorithm,
count: services()
.key_backups
.count_keys(sender_user, &version)?
.try_into()
.expect("count should fit in UInt"),
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
etag: services().key_backups.get_etag(sender_user, &version)?,
version,
}))
})
}
/// # `GET /_matrix/client/r0/room_keys/version`
///
/// Get information about an existing backup.
pub(crate) async fn get_backup_info_route(
body: Ar<get_backup_info::v3::Request>,
) -> Result<Ra<get_backup_info::v3::Response>> {
pub async fn get_backup_info_route(
body: Ruma<get_backup_info::v3::Request>,
) -> Result<get_backup_info::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let algorithm = services()
.key_backups
@ -86,45 +77,46 @@ pub(crate) async fn get_backup_info_route(
"Key backup does not exist.",
))?;
Ok(Ra(get_backup_info::v3::Response {
Ok(get_backup_info::v3::Response {
algorithm,
count: services()
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
version: body.version.clone(),
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
version: body.version.to_owned(),
})
}
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
///
/// Delete an existing key backup.
///
/// - Deletes both information about the backup, as well as all key data related
/// to the backup
pub(crate) async fn delete_backup_version_route(
body: Ar<delete_backup_version::v3::Request>,
) -> Result<Ra<delete_backup_version::v3::Response>> {
/// - Deletes both information about the backup, as well as all key data related to the backup
pub async fn delete_backup_version_route(
body: Ruma<delete_backup_version::v3::Request>,
) -> Result<delete_backup_version::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().key_backups.delete_backup(sender_user, &body.version)?;
services()
.key_backups
.delete_backup(sender_user, &body.version)?;
Ok(Ra(delete_backup_version::v3::Response {}))
Ok(delete_backup_version::v3::Response {})
}
/// # `PUT /_matrix/client/r0/room_keys/keys`
///
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is
/// allowed
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
pub(crate) async fn add_backup_keys_route(
body: Ar<add_backup_keys::v3::Request>,
) -> Result<Ra<add_backup_keys::v3::Response>> {
pub async fn add_backup_keys_route(
body: Ruma<add_backup_keys::v3::Request>,
) -> Result<add_backup_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
@ -135,8 +127,7 @@ pub(crate) async fn add_backup_keys_route(
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the \
backup.",
"You may only manipulate the most recently created version of the backup.",
));
}
@ -148,31 +139,31 @@ pub(crate) async fn add_backup_keys_route(
room_id,
session_id,
key_data,
)?;
)?
}
}
Ok(Ra(add_backup_keys::v3::Response {
count: services()
Ok(add_backup_keys::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Add the received backup keys to the database.
///
/// - Only manipulating the most recently created version of the backup is
/// allowed
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
pub(crate) async fn add_backup_keys_for_room_route(
body: Ar<add_backup_keys_for_room::v3::Request>,
) -> Result<Ra<add_backup_keys_for_room::v3::Response>> {
pub async fn add_backup_keys_for_room_route(
body: Ruma<add_backup_keys_for_room::v3::Request>,
) -> Result<add_backup_keys_for_room::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
@ -183,8 +174,7 @@ pub(crate) async fn add_backup_keys_for_room_route(
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the \
backup.",
"You may only manipulate the most recently created version of the backup.",
));
}
@ -195,30 +185,30 @@ pub(crate) async fn add_backup_keys_for_room_route(
&body.room_id,
session_id,
key_data,
)?;
)?
}
Ok(Ra(add_backup_keys_for_room::v3::Response {
count: services()
Ok(add_backup_keys_for_room::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Add the received backup key to the database.
///
/// - Only manipulating the most recently created version of the backup is
/// allowed
/// - Only manipulating the most recently created version of the backup is allowed
/// - Adds the keys to the backup
/// - Returns the new number of keys in this backup and the etag
pub(crate) async fn add_backup_keys_for_session_route(
body: Ar<add_backup_keys_for_session::v3::Request>,
) -> Result<Ra<add_backup_keys_for_session::v3::Response>> {
pub async fn add_backup_keys_for_session_route(
body: Ruma<add_backup_keys_for_session::v3::Request>,
) -> Result<add_backup_keys_for_session::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if Some(&body.version)
@ -229,8 +219,7 @@ pub(crate) async fn add_backup_keys_for_session_route(
{
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"You may only manipulate the most recently created version of the \
backup.",
"You may only manipulate the most recently created version of the backup.",
));
}
@ -242,126 +231,116 @@ pub(crate) async fn add_backup_keys_for_session_route(
&body.session_data,
)?;
Ok(Ra(add_backup_keys_for_session::v3::Response {
count: services()
Ok(add_backup_keys_for_session::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}
/// # `GET /_matrix/client/r0/room_keys/keys`
///
/// Retrieves all keys from the backup.
pub(crate) async fn get_backup_keys_route(
body: Ar<get_backup_keys::v3::Request>,
) -> Result<Ra<get_backup_keys::v3::Response>> {
pub async fn get_backup_keys_route(
body: Ruma<get_backup_keys::v3::Request>,
) -> Result<get_backup_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
Ok(Ra(get_backup_keys::v3::Response {
rooms,
}))
Ok(get_backup_keys::v3::Response { rooms })
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Retrieves all keys from the backup for a given room.
pub(crate) async fn get_backup_keys_for_room_route(
body: Ar<get_backup_keys_for_room::v3::Request>,
) -> Result<Ra<get_backup_keys_for_room::v3::Response>> {
pub async fn get_backup_keys_for_room_route(
body: Ruma<get_backup_keys_for_room::v3::Request>,
) -> Result<get_backup_keys_for_room::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sessions = services().key_backups.get_room(
sender_user,
&body.version,
&body.room_id,
)?;
let sessions = services()
.key_backups
.get_room(sender_user, &body.version, &body.room_id)?;
Ok(Ra(get_backup_keys_for_room::v3::Response {
sessions,
}))
Ok(get_backup_keys_for_room::v3::Response { sessions })
}
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Retrieves a key from the backup.
pub(crate) async fn get_backup_keys_for_session_route(
body: Ar<get_backup_keys_for_session::v3::Request>,
) -> Result<Ra<get_backup_keys_for_session::v3::Response>> {
pub async fn get_backup_keys_for_session_route(
body: Ruma<get_backup_keys_for_session::v3::Request>,
) -> Result<get_backup_keys_for_session::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let key_data = services()
.key_backups
.get_session(
sender_user,
&body.version,
&body.room_id,
&body.session_id,
)?
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Backup key not found for this user's session.",
))?;
Ok(Ra(get_backup_keys_for_session::v3::Response {
key_data,
}))
Ok(get_backup_keys_for_session::v3::Response { key_data })
}
/// # `DELETE /_matrix/client/r0/room_keys/keys`
///
/// Delete the keys from the backup.
pub(crate) async fn delete_backup_keys_route(
body: Ar<delete_backup_keys::v3::Request>,
) -> Result<Ra<delete_backup_keys::v3::Response>> {
pub async fn delete_backup_keys_route(
body: Ruma<delete_backup_keys::v3::Request>,
) -> Result<delete_backup_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().key_backups.delete_all_keys(sender_user, &body.version)?;
services()
.key_backups
.delete_all_keys(sender_user, &body.version)?;
Ok(Ra(delete_backup_keys::v3::Response {
count: services()
Ok(delete_backup_keys::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
///
/// Delete the keys from the backup for a given room.
pub(crate) async fn delete_backup_keys_for_room_route(
body: Ar<delete_backup_keys_for_room::v3::Request>,
) -> Result<Ra<delete_backup_keys_for_room::v3::Response>> {
pub async fn delete_backup_keys_for_room_route(
body: Ruma<delete_backup_keys_for_room::v3::Request>,
) -> Result<delete_backup_keys_for_room::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().key_backups.delete_room_keys(
sender_user,
&body.version,
&body.room_id,
)?;
services()
.key_backups
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
Ok(Ra(delete_backup_keys_for_room::v3::Response {
count: services()
Ok(delete_backup_keys_for_room::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
///
/// Delete a key from the backup.
pub(crate) async fn delete_backup_keys_for_session_route(
body: Ar<delete_backup_keys_for_session::v3::Request>,
) -> Result<Ra<delete_backup_keys_for_session::v3::Response>> {
pub async fn delete_backup_keys_for_session_route(
body: Ruma<delete_backup_keys_for_session::v3::Request>,
) -> Result<delete_backup_keys_for_session::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().key_backups.delete_room_key(
@ -371,12 +350,13 @@ pub(crate) async fn delete_backup_keys_for_session_route(
&body.session_id,
)?;
Ok(Ra(delete_backup_keys_for_session::v3::Response {
count: services()
Ok(delete_backup_keys_for_session::v3::Response {
count: (services()
.key_backups
.count_keys(sender_user, &body.version)?
.try_into()
.expect("count should fit in UInt"),
etag: services().key_backups.get_etag(sender_user, &body.version)?,
}))
.count_keys(sender_user, &body.version)? as u32)
.into(),
etag: services()
.key_backups
.get_etag(sender_user, &body.version)?,
})
}

View file

@ -1,20 +1,19 @@
use std::collections::BTreeMap;
use crate::{services, Result, Ruma};
use ruma::api::client::discovery::get_capabilities::{
self,
v3::{Capabilities, RoomVersionStability, RoomVersionsCapability},
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
};
use crate::{services, Ar, Ra, Result};
use std::collections::BTreeMap;
/// # `GET /_matrix/client/r0/capabilities`
///
/// Get information on the supported feature set and other relevent capabilities
/// of this server.
pub(crate) async fn get_capabilities_route(
_body: Ar<get_capabilities::v3::Request>,
) -> Result<Ra<get_capabilities::v3::Response>> {
/// Get information on the supported feature set and other relevent capabilities of this server.
pub async fn get_capabilities_route(
_body: Ruma<get_capabilities::v3::Request>,
) -> Result<get_capabilities::v3::Response> {
let mut available = BTreeMap::new();
for room_version in &services().globals.unstable_room_versions {
available.insert(room_version.clone(), RoomVersionStability::Unstable);
}
for room_version in &services().globals.stable_room_versions {
available.insert(room_version.clone(), RoomVersionStability::Stable);
}
@ -25,7 +24,5 @@ pub(crate) async fn get_capabilities_route(
available,
};
Ok(Ra(get_capabilities::v3::Response {
capabilities,
}))
Ok(get_capabilities::v3::Response { capabilities })
}

View file

@ -1,80 +1,116 @@
use ruma::api::client::{
config::{
get_global_account_data, get_room_account_data,
set_global_account_data, set_room_account_data,
use crate::{services, Error, Result, Ruma};
use ruma::{
api::client::{
config::{
get_global_account_data, get_room_account_data, set_global_account_data,
set_room_account_data,
},
error::ErrorKind,
},
error::ErrorKind,
events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent},
serde::Raw,
};
use crate::{services, Ar, Error, Ra, Result};
use serde::Deserialize;
use serde_json::{json, value::RawValue as RawJsonValue};
/// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}`
///
/// Sets some account data for the sender user.
pub(crate) async fn set_global_account_data_route(
body: Ar<set_global_account_data::v3::Request>,
) -> Result<Ra<set_global_account_data::v3::Response>> {
pub async fn set_global_account_data_route(
body: Ruma<set_global_account_data::v3::Request>,
) -> Result<set_global_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().account_data.update_global_any(
let data: serde_json::Value = serde_json::from_str(body.data.json().get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string();
services().account_data.update(
None,
sender_user,
&body.event_type,
&body.data,
event_type.clone().into(),
&json!({
"type": event_type,
"content": data,
}),
)?;
Ok(Ra(set_global_account_data::v3::Response {}))
Ok(set_global_account_data::v3::Response {})
}
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
///
/// Sets some room account data for the sender user.
pub(crate) async fn set_room_account_data_route(
body: Ar<set_room_account_data::v3::Request>,
) -> Result<Ra<set_room_account_data::v3::Response>> {
pub async fn set_room_account_data_route(
body: Ruma<set_room_account_data::v3::Request>,
) -> Result<set_room_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().account_data.update_room_any(
&body.room_id,
let data: serde_json::Value = serde_json::from_str(body.data.json().get())
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?;
let event_type = body.event_type.to_string();
services().account_data.update(
Some(&body.room_id),
sender_user,
&body.event_type,
&body.data,
event_type.clone().into(),
&json!({
"type": event_type,
"content": data,
}),
)?;
Ok(Ra(set_room_account_data::v3::Response {}))
Ok(set_room_account_data::v3::Response {})
}
/// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}`
///
/// Gets some account data for the sender user.
pub(crate) async fn get_global_account_data_route(
body: Ar<get_global_account_data::v3::Request>,
) -> Result<Ra<get_global_account_data::v3::Response>> {
pub async fn get_global_account_data_route(
body: Ruma<get_global_account_data::v3::Request>,
) -> Result<get_global_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let account_data = services()
let event: Box<RawJsonValue> = services()
.account_data
.get_global_any(sender_user, &body.event_type)?
.get(None, sender_user, body.event_type.to_string().into())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
Ok(Ra(get_global_account_data::v3::Response {
account_data,
}))
let account_data = serde_json::from_str::<ExtractGlobalEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
Ok(get_global_account_data::v3::Response { account_data })
}
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}`
///
/// Gets some room account data for the sender user.
pub(crate) async fn get_room_account_data_route(
body: Ar<get_room_account_data::v3::Request>,
) -> Result<Ra<get_room_account_data::v3::Response>> {
pub async fn get_room_account_data_route(
body: Ruma<get_room_account_data::v3::Request>,
) -> Result<get_room_account_data::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let account_data = services()
let event: Box<RawJsonValue> = services()
.account_data
.get_room_any(&body.room_id, sender_user, &body.event_type)?
.get(Some(&body.room_id), sender_user, body.event_type.clone())?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
Ok(Ra(get_room_account_data::v3::Response {
account_data,
}))
let account_data = serde_json::from_str::<ExtractRoomEventContent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
Ok(get_room_account_data::v3::Response { account_data })
}
#[derive(Deserialize)]
struct ExtractRoomEventContent {
content: Raw<AnyRoomAccountDataEventContent>,
}
#[derive(Deserialize)]
struct ExtractGlobalEventContent {
content: Raw<AnyGlobalAccountDataEventContent>,
}

View file

@ -1,58 +1,60 @@
use std::collections::HashSet;
use crate::{services, Error, Result, Ruma};
use ruma::{
api::client::{
context::get_context, error::ErrorKind, filter::LazyLoadOptions,
},
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
events::StateEventType,
uint,
};
use std::collections::HashSet;
use tracing::error;
use crate::{services, Ar, Error, Ra, Result};
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
///
/// Allows loading room history around an event.
///
/// - Only works if the user is joined (TODO: always allow, but only show events
/// if the user was joined, depending on `history_visibility`)
#[allow(clippy::too_many_lines)]
pub(crate) async fn get_context_route(
body: Ar<get_context::v3::Request>,
) -> Result<Ra<get_context::v3::Response>> {
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
/// joined, depending on history_visibility)
pub async fn get_context_route(
body: Ruma<get_context::v3::Request>,
) -> Result<get_context::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let (lazy_load_enabled, lazy_load_send_redundant) =
match &body.filter.lazy_load_options {
LazyLoadOptions::Enabled {
include_redundant_members,
} => (true, *include_redundant_members),
LazyLoadOptions::Disabled => (false, false),
};
let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options {
LazyLoadOptions::Enabled {
include_redundant_members,
} => (true, *include_redundant_members),
_ => (false, false),
};
let mut lazy_loaded = HashSet::new();
let base_token =
services().rooms.timeline.get_pdu_count(&body.event_id)?.ok_or(
Error::BadRequest(ErrorKind::NotFound, "Base event id not found."),
)?;
let base_token = services()
.rooms
.timeline
.get_pdu_count(&body.event_id)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Base event id not found.",
))?;
let base_event = services().rooms.timeline.get_pdu(&body.event_id)?.ok_or(
Error::BadRequest(ErrorKind::NotFound, "Base event not found."),
)?;
let base_event =
services()
.rooms
.timeline
.get_pdu(&body.event_id)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Base event not found.",
))?;
let room_id = base_event.room_id.clone();
if !services().rooms.state_accessor.user_can_see_event(
sender_user,
&room_id,
&body.event_id,
)? {
if !services()
.rooms
.state_accessor
.user_can_see_event(sender_user, &room_id, &body.event_id)?
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"You don't have permission to view this event.",
));
}
@ -68,8 +70,7 @@ pub(crate) async fn get_context_route(
}
// Use limit with maximum 100
let half_limit = usize::try_from(body.limit.min(uint!(100)) / uint!(2))
.expect("0-50 should fit in usize");
let limit = u64::from(body.limit).min(100) as usize;
let base_event = base_event.to_room_event();
@ -77,8 +78,8 @@ pub(crate) async fn get_context_route(
.rooms
.timeline
.pdus_until(sender_user, &room_id, base_token)?
.take(half_limit)
.filter_map(Result::ok)
.take(limit / 2)
.filter_map(|r| r.ok()) // Remove buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -102,17 +103,20 @@ pub(crate) async fn get_context_route(
let start_token = events_before
.last()
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
.map(|(count, _)| count.stringify())
.unwrap_or_else(|| base_token.stringify());
let events_before: Vec<_> =
events_before.into_iter().map(|(_, pdu)| pdu.to_room_event()).collect();
let events_before: Vec<_> = events_before
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
let events_after: Vec<_> = services()
.rooms
.timeline
.pdus_after(sender_user, &room_id, base_token)?
.take(half_limit)
.filter_map(Result::ok)
.take(limit / 2)
.filter_map(|r| r.ok()) // Remove buggy events
.filter(|(_, pdu)| {
services()
.rooms
@ -134,46 +138,59 @@ pub(crate) async fn get_context_route(
}
}
let shortstatehash =
match services().rooms.state_accessor.pdu_shortstatehash(
events_after.last().map_or(&*body.event_id, |(_, e)| &*e.event_id),
)? {
Some(s) => s,
None => services()
.rooms
.state
.get_room_shortstatehash(&room_id)?
.expect("All rooms have state"),
};
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
events_after
.last()
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
)? {
Some(s) => s,
None => services()
.rooms
.state
.get_room_shortstatehash(&room_id)?
.expect("All rooms have state"),
};
let state_ids =
services().rooms.state_accessor.state_full_ids(shortstatehash).await?;
let state_ids = services()
.rooms
.state_accessor
.state_full_ids(shortstatehash)
.await?;
let end_token = events_after
.last()
.map_or_else(|| base_token.stringify(), |(count, _)| count.stringify());
.map(|(count, _)| count.stringify())
.unwrap_or_else(|| base_token.stringify());
let events_after: Vec<_> =
events_after.into_iter().map(|(_, pdu)| pdu.to_room_event()).collect();
let events_after: Vec<_> = events_after
.into_iter()
.map(|(_, pdu)| pdu.to_room_event())
.collect();
let mut state = Vec::new();
for (shortstatekey, event_id) in state_ids {
let (event_type, state_key) =
services().rooms.short.get_statekey_from_short(shortstatekey)?;
for (shortstatekey, id) in state_ids {
let (event_type, state_key) = services()
.rooms
.short
.get_statekey_from_short(shortstatekey)?;
if event_type != StateEventType::RoomMember {
let Some(pdu) = services().rooms.timeline.get_pdu(&event_id)?
else {
error!(%event_id, "Event in state not found");
continue;
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
};
state.push(pdu.to_state_event());
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
let Some(pdu) = services().rooms.timeline.get_pdu(&event_id)?
else {
error!(%event_id, "Event in state not found");
continue;
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
};
state.push(pdu.to_state_event());
}
@ -188,5 +205,5 @@ pub(crate) async fn get_context_route(
state,
};
Ok(Ra(resp))
Ok(resp)
}

View file

@ -1,40 +1,35 @@
use crate::{services, utils, Error, Result, Ruma};
use ruma::api::client::{
device::{
self, delete_device, delete_devices, get_device, get_devices,
update_device,
},
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
error::ErrorKind,
uiaa::{AuthFlow, AuthType, UiaaInfo},
};
use super::SESSION_ID_LENGTH;
use crate::{services, utils, Ar, Error, Ra, Result};
/// # `GET /_matrix/client/r0/devices`
///
/// Get metadata on all devices of the sender user.
pub(crate) async fn get_devices_route(
body: Ar<get_devices::v3::Request>,
) -> Result<Ra<get_devices::v3::Response>> {
pub async fn get_devices_route(
body: Ruma<get_devices::v3::Request>,
) -> Result<get_devices::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let devices: Vec<device::Device> = services()
.users
.all_devices_metadata(sender_user)
.filter_map(Result::ok)
.filter_map(|r| r.ok()) // Filter out buggy devices
.collect();
Ok(Ra(get_devices::v3::Response {
devices,
}))
Ok(get_devices::v3::Response { devices })
}
/// # `GET /_matrix/client/r0/devices/{deviceId}`
///
/// Get metadata on a single device of the sender user.
pub(crate) async fn get_device_route(
body: Ar<get_device::v3::Request>,
) -> Result<Ra<get_device::v3::Response>> {
pub async fn get_device_route(
body: Ruma<get_device::v3::Request>,
) -> Result<get_device::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let device = services()
@ -42,17 +37,15 @@ pub(crate) async fn get_device_route(
.get_device_metadata(sender_user, &body.body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
Ok(Ra(get_device::v3::Response {
device,
}))
Ok(get_device::v3::Response { device })
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
///
/// Updates the metadata on a given device of the sender user.
pub(crate) async fn update_device_route(
body: Ar<update_device::v3::Request>,
) -> Result<Ra<update_device::v3::Response>> {
pub async fn update_device_route(
body: Ruma<update_device::v3::Request>,
) -> Result<update_device::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut device = services()
@ -60,15 +53,13 @@ pub(crate) async fn update_device_route(
.get_device_metadata(sender_user, &body.device_id)?
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
device.display_name.clone_from(&body.display_name);
device.display_name = body.display_name.clone();
services().users.update_device_metadata(
sender_user,
&body.device_id,
&device,
)?;
services()
.users
.update_device_metadata(sender_user, &body.device_id, &device)?;
Ok(Ra(update_device::v3::Response {}))
Ok(update_device::v3::Response {})
}
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
@ -77,16 +68,14 @@ pub(crate) async fn update_device_route(
///
/// - Requires UIAA to verify user password
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip,
/// last seen ts)
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
pub(crate) async fn delete_device_route(
body: Ar<delete_device::v3::Request>,
) -> Result<Ra<delete_device::v3::Response>> {
pub async fn delete_device_route(
body: Ruma<delete_device::v3::Request>,
) -> Result<delete_device::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
// UIAA
let mut uiaainfo = UiaaInfo {
@ -94,33 +83,35 @@ pub(crate) async fn delete_device_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
sender_user,
sender_device,
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) =
services()
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
services()
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
services().users.remove_device(sender_user, &body.device_id)?;
services()
.users
.remove_device(sender_user, &body.device_id)?;
Ok(Ra(delete_device::v3::Response {}))
Ok(delete_device::v3::Response {})
}
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
@ -131,16 +122,14 @@ pub(crate) async fn delete_device_route(
///
/// For each device:
/// - Invalidates access token
/// - Deletes device metadata (device id, device display name, last seen ip,
/// last seen ts)
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
/// - Forgets to-device events
/// - Triggers device list updates
pub(crate) async fn delete_devices_route(
body: Ar<delete_devices::v3::Request>,
) -> Result<Ra<delete_devices::v3::Response>> {
pub async fn delete_devices_route(
body: Ruma<delete_devices::v3::Request>,
) -> Result<delete_devices::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
// UIAA
let mut uiaainfo = UiaaInfo {
@ -148,33 +137,33 @@ pub(crate) async fn delete_devices_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
sender_user,
sender_device,
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) =
services()
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
services()
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
for device_id in &body.devices {
services().users.remove_device(sender_user, device_id)?;
services().users.remove_device(sender_user, device_id)?
}
Ok(Ra(delete_devices::v3::Response {}))
Ok(delete_devices::v3::Response {})
}

View file

@ -1,9 +1,10 @@
use crate::{services, Error, Result, Ruma};
use ruma::{
api::{
client::{
directory::{
get_public_rooms, get_public_rooms_filtered,
get_room_visibility, set_room_visibility,
get_public_rooms, get_public_rooms_filtered, get_room_visibility,
set_room_visibility,
},
error::ErrorKind,
room,
@ -15,31 +16,26 @@ use ruma::{
room::{
avatar::RoomAvatarEventContent,
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
guest_access::{GuestAccess, RoomGuestAccessEventContent},
history_visibility::{
HistoryVisibility, RoomHistoryVisibilityEventContent,
},
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
join_rules::{JoinRule, RoomJoinRulesEventContent},
topic::RoomTopicEventContent,
},
StateEventType,
},
uint, ServerName, UInt,
ServerName, UInt,
};
use tracing::{error, info, warn};
use crate::{
service::rooms::state::ExtractType, services, Ar, Error, Ra, Result,
};
/// # `POST /_matrix/client/r0/publicRooms`
///
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
pub(crate) async fn get_public_rooms_filtered_route(
body: Ar<get_public_rooms_filtered::v3::Request>,
) -> Result<Ra<get_public_rooms_filtered::v3::Response>> {
pub async fn get_public_rooms_filtered_route(
body: Ruma<get_public_rooms_filtered::v3::Request>,
) -> Result<get_public_rooms_filtered::v3::Response> {
get_public_rooms_filtered_helper(
body.server.as_deref(),
body.limit,
@ -48,7 +44,6 @@ pub(crate) async fn get_public_rooms_filtered_route(
&body.room_network,
)
.await
.map(Ra)
}
/// # `GET /_matrix/client/r0/publicRooms`
@ -56,9 +51,9 @@ pub(crate) async fn get_public_rooms_filtered_route(
/// Lists the public rooms on this server.
///
/// - Rooms are ordered by the number of joined members
pub(crate) async fn get_public_rooms_route(
body: Ar<get_public_rooms::v3::Request>,
) -> Result<Ra<get_public_rooms::v3::Response>> {
pub async fn get_public_rooms_route(
body: Ruma<get_public_rooms::v3::Request>,
) -> Result<get_public_rooms::v3::Response> {
let response = get_public_rooms_filtered_helper(
body.server.as_deref(),
body.limit,
@ -68,12 +63,12 @@ pub(crate) async fn get_public_rooms_route(
)
.await?;
Ok(Ra(get_public_rooms::v3::Response {
Ok(get_public_rooms::v3::Response {
chunk: response.chunk,
prev_batch: response.prev_batch,
next_batch: response.next_batch,
total_room_count_estimate: response.total_room_count_estimate,
}))
})
}
/// # `PUT /_matrix/client/r0/directory/list/room/{roomId}`
@ -81,9 +76,9 @@ pub(crate) async fn get_public_rooms_route(
/// Sets the visibility of a given room in the room directory.
///
/// - TODO: Access control checks
pub(crate) async fn set_room_visibility_route(
body: Ar<set_room_visibility::v3::Request>,
) -> Result<Ra<set_room_visibility::v3::Response>> {
pub async fn set_room_visibility_route(
body: Ruma<set_room_visibility::v3::Request>,
) -> Result<set_room_visibility::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if !services().rooms.metadata.exists(&body.room_id)? {
@ -94,15 +89,9 @@ pub(crate) async fn set_room_visibility_route(
match &body.visibility {
room::Visibility::Public => {
services().rooms.directory.set_public(&body.room_id)?;
info!(
user_id = %sender_user,
room_id = %body.room_id,
"User made room public",
);
}
room::Visibility::Private => {
services().rooms.directory.set_not_public(&body.room_id)?;
info!("{} made {} public", sender_user, body.room_id);
}
room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?,
_ => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -111,34 +100,29 @@ pub(crate) async fn set_room_visibility_route(
}
}
Ok(Ra(set_room_visibility::v3::Response {}))
Ok(set_room_visibility::v3::Response {})
}
/// # `GET /_matrix/client/r0/directory/list/room/{roomId}`
///
/// Gets the visibility of a given room in the room directory.
pub(crate) async fn get_room_visibility_route(
body: Ar<get_room_visibility::v3::Request>,
) -> Result<Ra<get_room_visibility::v3::Response>> {
pub async fn get_room_visibility_route(
body: Ruma<get_room_visibility::v3::Request>,
) -> Result<get_room_visibility::v3::Response> {
if !services().rooms.metadata.exists(&body.room_id)? {
// Return 404 if the room doesn't exist
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
}
Ok(Ra(get_room_visibility::v3::Response {
visibility: if services()
.rooms
.directory
.is_public_room(&body.room_id)?
{
Ok(get_room_visibility::v3::Response {
visibility: if services().rooms.directory.is_public_room(&body.room_id)? {
room::Visibility::Public
} else {
room::Visibility::Private
},
}))
})
}
#[allow(clippy::too_many_lines)]
pub(crate) async fn get_public_rooms_filtered_helper(
server: Option<&ServerName>,
limit: Option<UInt>,
@ -146,8 +130,8 @@ pub(crate) async fn get_public_rooms_filtered_helper(
filter: &Filter,
_network: &RoomNetwork,
) -> Result<get_public_rooms_filtered::v3::Response> {
if let Some(other_server) = server
.filter(|server| *server != services().globals.server_name().as_str())
if let Some(other_server) =
server.filter(|server| *server != services().globals.server_name().as_str())
{
let response = services()
.sending
@ -173,8 +157,8 @@ pub(crate) async fn get_public_rooms_filtered_helper(
});
}
let limit = limit.unwrap_or(uint!(10));
let mut num_since = UInt::MIN;
let limit = limit.map_or(10, u64::from);
let mut num_since = 0_u64;
if let Some(s) = &since {
let mut characters = s.chars();
@ -189,9 +173,10 @@ pub(crate) async fn get_public_rooms_filtered_helper(
}
};
num_since = characters.collect::<String>().parse().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token.")
})?;
num_since = characters
.collect::<String>()
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?;
if backwards {
num_since = num_since.saturating_sub(limit);
@ -202,12 +187,129 @@ pub(crate) async fn get_public_rooms_filtered_helper(
.rooms
.directory
.public_rooms()
.filter_map(Result::ok)
.map(room_id_to_chunk)
.filter_map(Result::ok)
.map(|room_id| {
let room_id = room_id?;
let chunk = PublicRoomsChunk {
canonical_alias: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
.map_err(|_| {
Error::bad_database("Invalid canonical alias event in database.")
})
})?,
name: services().rooms.state_accessor.get_name(&room_id)?,
num_joined_members: services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room {} has no member count", room_id);
0
})
.try_into()
.expect("user count should not be that big"),
topic: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic))
.map_err(|_| {
error!("Invalid room topic event in database for room {}", room_id);
Error::bad_database("Invalid room topic event in database.")
})
})?,
world_readable: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
c.history_visibility == HistoryVisibility::WorldReadable
})
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
})?,
guest_can_join: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| {
c.guest_access == GuestAccess::CanJoin
})
.map_err(|_| {
Error::bad_database("Invalid room guest access event in database.")
})
})?,
avatar_url: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url)
.map_err(|_| {
Error::bad_database("Invalid room avatar event in database.")
})
})
.transpose()?
// url is now an Option<String> so we must flatten
.flatten(),
join_rule: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
JoinRule::Public => Some(PublicRoomJoinRule::Public),
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
_ => None,
})
.map_err(|e| {
error!("Invalid room join rule event in database: {}", e);
Error::BadDatabase("Invalid room join rule event in database.")
})
})
.transpose()?
.flatten()
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
room_type: services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
.map(|s| {
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(
|e| {
error!("Invalid room create event in database: {}", e);
Error::BadDatabase("Invalid room create event in database.")
},
)
})
.transpose()?
.and_then(|e| e.room_type),
room_id,
};
Ok(chunk)
})
.filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms
.filter(|chunk| {
if let Some(query) =
filter.generic_search_term.as_ref().map(|q| q.to_lowercase())
if let Some(query) = filter
.generic_search_term
.as_ref()
.map(|q| q.to_lowercase())
{
if let Some(name) = &chunk.name {
if name.as_str().to_lowercase().contains(&query) {
@ -222,8 +324,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
}
if let Some(canonical_alias) = &chunk.canonical_alias {
if canonical_alias.as_str().to_lowercase().contains(&query)
{
if canonical_alias.as_str().to_lowercase().contains(&query) {
return true;
}
}
@ -234,31 +335,30 @@ pub(crate) async fn get_public_rooms_filtered_helper(
true
}
})
// We need to collect all, so we can sort by member count
.collect();
all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members));
let total_room_count_estimate =
all_rooms.len().try_into().unwrap_or(UInt::MAX);
let total_room_count_estimate = (all_rooms.len() as u32).into();
let chunk: Vec<_> = all_rooms
.into_iter()
.skip(num_since.try_into().expect("UInt should fit in usize"))
.take(limit.try_into().expect("UInt should fit in usize"))
.skip(num_since as usize)
.take(limit as usize)
.collect();
let prev_batch = if num_since == uint!(0) {
let prev_batch = if num_since == 0 {
None
} else {
Some(format!("p{num_since}"))
};
let next_batch =
if chunk.len() < limit.try_into().expect("UInt should fit in usize") {
None
} else {
Some(format!("n{}", num_since + limit))
};
let next_batch = if chunk.len() < limit as usize {
None
} else {
Some(format!("n{}", num_since + limit))
};
Ok(get_public_rooms_filtered::v3::Response {
chunk,
@ -267,135 +367,3 @@ pub(crate) async fn get_public_rooms_filtered_helper(
total_room_count_estimate: Some(total_room_count_estimate),
})
}
#[allow(clippy::too_many_lines)]
#[tracing::instrument]
fn room_id_to_chunk(room_id: ruma::OwnedRoomId) -> Result<PublicRoomsChunk> {
let canonical_alias = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomCanonicalAliasEventContent| c.alias)
.map_err(|_| {
Error::bad_database(
"Invalid canonical alias event in database.",
)
})
})?;
let name = services().rooms.state_accessor.get_name(&room_id)?;
let num_joined_members = services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.unwrap_or_else(|| {
warn!("Room has no member count");
0
})
.try_into()
.expect("user count should not be that big");
let topic = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
.map_or(Ok(None), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomTopicEventContent| Some(c.topic))
.map_err(|_| {
error!("Invalid room topic event in database for room",);
Error::bad_database("Invalid room topic event in database.")
})
})?;
let world_readable = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomHistoryVisibilityEventContent| {
c.history_visibility == HistoryVisibility::WorldReadable
})
.map_err(|_| {
Error::bad_database(
"Invalid room history visibility event in database.",
)
})
})?;
let guest_can_join = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
.map_or(Ok(false), |s| {
serde_json::from_str(s.content.get())
.map(|c: RoomGuestAccessEventContent| {
c.guest_access == GuestAccess::CanJoin
})
.map_err(|_| {
Error::bad_database(
"Invalid room guest access event in database.",
)
})
})?;
let avatar_url = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomAvatarEventContent| c.url)
.map_err(|_| {
Error::bad_database(
"Invalid room avatar event in database.",
)
})
})
.transpose()?
.flatten();
let join_rule = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
serde_json::from_str(s.content.get())
.map(|c: RoomJoinRulesEventContent| match c.join_rule {
JoinRule::Public => Some(PublicRoomJoinRule::Public),
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
_ => None,
})
.map_err(|error| {
error!(%error, "Invalid room join rule event in database");
Error::BadDatabase(
"Invalid room join rule event in database.",
)
})
})
.transpose()?
.flatten()
.ok_or_else(|| {
Error::bad_database("Missing room join rule event for room.")
})?;
let room_type =
services().rooms.state.get_create_content::<ExtractType>(&room_id)?;
Ok(PublicRoomsChunk {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
})
}

View file

@ -1,39 +1,34 @@
use crate::{services, Error, Result, Ruma};
use ruma::api::client::{
error::ErrorKind,
filter::{create_filter, get_filter},
};
use crate::{services, Ar, Error, Ra, Result};
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
///
/// Loads a filter that was previously created.
///
/// - A user can only access their own filters
pub(crate) async fn get_filter_route(
body: Ar<get_filter::v3::Request>,
) -> Result<Ra<get_filter::v3::Response>> {
pub async fn get_filter_route(
body: Ruma<get_filter::v3::Request>,
) -> Result<get_filter::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let Some(filter) =
services().users.get_filter(sender_user, &body.filter_id)?
else {
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Filter not found.",
));
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
Some(filter) => filter,
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
};
Ok(Ra(get_filter::v3::Response::new(filter)))
Ok(get_filter::v3::Response::new(filter))
}
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
///
/// Creates a new filter to be used by other endpoints.
pub(crate) async fn create_filter_route(
body: Ar<create_filter::v3::Request>,
) -> Result<Ra<create_filter::v3::Response>> {
pub async fn create_filter_route(
body: Ruma<create_filter::v3::Request>,
) -> Result<create_filter::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(Ra(create_filter::v3::Response::new(
Ok(create_filter::v3::Response::new(
services().users.create_filter(sender_user, &body.filter)?,
)))
))
}

View file

@ -1,52 +1,44 @@
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
};
use super::SESSION_ID_LENGTH;
use crate::{services, utils, Error, Result, Ruma};
use futures_util::{stream::FuturesUnordered, StreamExt};
use ruma::{
api::{
client::{
error::ErrorKind,
keys::{
claim_keys, get_key_changes, get_keys, upload_keys,
upload_signatures, upload_signing_keys,
claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures,
upload_signing_keys,
},
uiaa::{AuthFlow, AuthType, UiaaInfo},
},
federation,
},
serde::Raw,
OneTimeKeyAlgorithm, OwnedDeviceId, OwnedServerName, OwnedUserId,
ServerName, UserId,
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
};
use serde_json::json;
use std::{
collections::{hash_map, BTreeMap, HashMap, HashSet},
time::{Duration, Instant},
};
use tracing::debug;
use super::SESSION_ID_LENGTH;
use crate::{services, utils, Ar, Error, Ra, Result};
/// # `POST /_matrix/client/r0/keys/upload`
///
/// Publish end-to-end encryption keys for the sender device.
///
/// - Adds one time keys
/// - If there are no device keys yet: Adds device keys (TODO: merge with
/// existing keys?)
pub(crate) async fn upload_keys_route(
body: Ar<upload_keys::v3::Request>,
) -> Result<Ra<upload_keys::v3::Response>> {
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
pub async fn upload_keys_route(
body: Ruma<upload_keys::v3::Request>,
) -> Result<upload_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
for (key_key, key_value) in &body.one_time_keys {
services().users.add_one_time_key(
sender_user,
sender_device,
key_key,
key_value,
)?;
services()
.users
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
}
if let Some(device_keys) = &body.device_keys {
@ -57,19 +49,17 @@ pub(crate) async fn upload_keys_route(
.get_device_keys(sender_user, sender_device)?
.is_none()
{
services().users.add_device_keys(
sender_user,
sender_device,
device_keys,
)?;
services()
.users
.add_device_keys(sender_user, sender_device, device_keys)?;
}
}
Ok(Ra(upload_keys::v3::Response {
Ok(upload_keys::v3::Response {
one_time_key_counts: services()
.users
.count_one_time_keys(sender_user, sender_device)?,
}))
})
}
/// # `POST /_matrix/client/r0/keys/query`
@ -78,30 +68,25 @@ pub(crate) async fn upload_keys_route(
///
/// - Always fetches users from other servers over federation
/// - Gets master keys, self-signing keys, user signing keys and device keys.
/// - The master and self-signing keys contain signatures that the user is
/// allowed to see
pub(crate) async fn get_keys_route(
body: Ar<get_keys::v3::Request>,
) -> Result<Ra<get_keys::v3::Response>> {
/// - The master and self-signing keys contain signatures that the user is allowed to see
pub async fn get_keys_route(body: Ruma<get_keys::v3::Request>) -> Result<get_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let response = get_keys_helper(Some(sender_user), &body.device_keys, |u| {
u == sender_user
})
.await?;
let response =
get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?;
Ok(Ra(response))
Ok(response)
}
/// # `POST /_matrix/client/r0/keys/claim`
///
/// Claims one-time keys
pub(crate) async fn claim_keys_route(
body: Ar<claim_keys::v3::Request>,
) -> Result<Ra<claim_keys::v3::Response>> {
pub async fn claim_keys_route(
body: Ruma<claim_keys::v3::Request>,
) -> Result<claim_keys::v3::Response> {
let response = claim_keys_helper(&body.one_time_keys).await?;
Ok(Ra(response))
Ok(response)
}
/// # `POST /_matrix/client/r0/keys/device_signing/upload`
@ -109,12 +94,11 @@ pub(crate) async fn claim_keys_route(
/// Uploads end-to-end key information for the sender user.
///
/// - Requires UIAA to verify password
pub(crate) async fn upload_signing_keys_route(
body: Ar<upload_signing_keys::v3::Request>,
) -> Result<Ra<upload_signing_keys::v3::Response>> {
pub async fn upload_signing_keys_route(
body: Ruma<upload_signing_keys::v3::Request>,
) -> Result<upload_signing_keys::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
// UIAA
let mut uiaainfo = UiaaInfo {
@ -122,26 +106,26 @@ pub(crate) async fn upload_signing_keys_route(
stages: vec![AuthType::Password],
}],
completed: Vec::new(),
params: Some(Box::default()),
params: Default::default(),
session: None,
auth_error: None,
};
if let Some(auth) = &body.auth {
let (worked, uiaainfo) = services().uiaa.try_auth(
sender_user,
sender_device,
auth,
&uiaainfo,
)?;
let (worked, uiaainfo) =
services()
.uiaa
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
if !worked {
return Err(Error::Uiaa(Box::new(uiaainfo)));
return Err(Error::Uiaa(uiaainfo));
}
// Success!
} else if let Some(json) = body.json_body {
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(Box::new(uiaainfo)));
services()
.uiaa
.create(sender_user, sender_device, &uiaainfo, &json)?;
return Err(Error::Uiaa(uiaainfo));
} else {
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
}
@ -150,29 +134,27 @@ pub(crate) async fn upload_signing_keys_route(
services().users.add_cross_signing_keys(
sender_user,
master_key,
body.self_signing_key.as_ref(),
body.user_signing_key.as_ref(),
// notify so that other users see the new keys
true,
&body.self_signing_key,
&body.user_signing_key,
true, // notify so that other users see the new keys
)?;
}
Ok(Ra(upload_signing_keys::v3::Response {}))
Ok(upload_signing_keys::v3::Response {})
}
/// # `POST /_matrix/client/r0/keys/signatures/upload`
///
/// Uploads end-to-end key signatures from the sender user.
pub(crate) async fn upload_signatures_route(
body: Ar<upload_signatures::v3::Request>,
) -> Result<Ra<upload_signatures::v3::Response>> {
pub async fn upload_signatures_route(
body: Ruma<upload_signatures::v3::Request>,
) -> Result<upload_signatures::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
for (user_id, keys) in &body.signed_keys {
for (key_id, key) in keys {
let key = serde_json::to_value(key).map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON")
})?;
let key = serde_json::to_value(key)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
for signature in key
.get("signatures")
@ -191,6 +173,7 @@ pub(crate) async fn upload_signatures_route(
"Invalid signature.",
))?
.clone()
.into_iter()
{
// Signature validation?
let signature = (
@ -204,31 +187,26 @@ pub(crate) async fn upload_signatures_route(
))?
.to_owned(),
);
services().users.sign_key(
user_id,
key_id,
signature,
sender_user,
)?;
services()
.users
.sign_key(user_id, key_id, signature, sender_user)?;
}
}
}
Ok(Ra(upload_signatures::v3::Response {
// TODO: integrate
failures: BTreeMap::new(),
}))
Ok(upload_signatures::v3::Response {
failures: BTreeMap::new(), // TODO: integrate
})
}
/// # `POST /_matrix/client/r0/keys/changes`
///
/// Gets a list of users who have updated their device identity keys since the
/// previous sync token.
/// Gets a list of users who have updated their device identity keys since the previous sync token.
///
/// - TODO: left users
pub(crate) async fn get_key_changes_route(
body: Ar<get_key_changes::v3::Request>,
) -> Result<Ra<get_key_changes::v3::Response>> {
pub async fn get_key_changes_route(
body: Ruma<get_key_changes::v3::Request>,
) -> Result<get_key_changes::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut device_list_updates = HashSet::new();
@ -238,24 +216,23 @@ pub(crate) async fn get_key_changes_route(
.users
.keys_changed(
sender_user.as_str(),
body.from.parse().map_err(|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid `from`.",
)
})?,
Some(body.to.parse().map_err(|_| {
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")
})?),
body.from
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
Some(
body.to
.parse()
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?,
),
)
.filter_map(Result::ok),
.filter_map(|r| r.ok()),
);
for room_id in services()
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
.filter_map(|r| r.ok())
{
device_list_updates.extend(
services()
@ -263,29 +240,21 @@ pub(crate) async fn get_key_changes_route(
.keys_changed(
room_id.as_ref(),
body.from.parse().map_err(|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid `from`.",
)
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
})?,
Some(body.to.parse().map_err(|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid `to`.",
)
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.")
})?),
)
.filter_map(Result::ok),
.filter_map(|r| r.ok()),
);
}
Ok(Ra(get_key_changes::v3::Response {
Ok(get_key_changes::v3::Response {
changed: device_list_updates.into_iter().collect(),
// TODO
left: Vec::new(),
}))
left: Vec::new(), // TODO
})
}
#[allow(clippy::too_many_lines)]
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
sender_user: Option<&UserId>,
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
@ -313,24 +282,16 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut container = BTreeMap::new();
for device_id in services().users.all_device_ids(user_id) {
let device_id = device_id?;
if let Some(mut keys) =
services().users.get_device_keys(user_id, &device_id)?
{
if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? {
let metadata = services()
.users
.get_device_metadata(user_id, &device_id)?
.ok_or_else(|| {
Error::bad_database(
"all_device_keys contained nonexistent device.",
)
Error::bad_database("all_device_keys contained nonexistent device.")
})?;
add_unsigned_device_display_name(&mut keys, metadata)
.map_err(|_| {
Error::bad_database(
"invalid device keys in database",
)
})?;
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
container.insert(device_id, keys);
}
}
@ -338,9 +299,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
} else {
for device_id in device_ids {
let mut container = BTreeMap::new();
if let Some(mut keys) =
services().users.get_device_keys(user_id, device_id)?
{
if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? {
let metadata = services()
.users
.get_device_metadata(user_id, device_id)?
@ -350,35 +309,29 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
))?;
add_unsigned_device_display_name(&mut keys, metadata)
.map_err(|_| {
Error::bad_database(
"invalid device keys in database",
)
})?;
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
container.insert(device_id.to_owned(), keys);
}
device_keys.insert(user_id.to_owned(), container);
}
}
if let Some(master_key) = services().users.get_master_key(
sender_user,
user_id,
&allowed_signatures,
)? {
if let Some(master_key) =
services()
.users
.get_master_key(sender_user, user_id, &allowed_signatures)?
{
master_keys.insert(user_id.to_owned(), master_key);
}
if let Some(self_signing_key) = services().users.get_self_signing_key(
sender_user,
user_id,
&allowed_signatures,
)? {
if let Some(self_signing_key) =
services()
.users
.get_self_signing_key(sender_user, user_id, &allowed_signatures)?
{
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
}
if Some(user_id) == sender_user {
if let Some(user_signing_key) =
services().users.get_user_signing_key(user_id)?
{
if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? {
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
}
}
@ -386,167 +339,123 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
let mut failures = BTreeMap::new();
let back_off = |id| async {
match services()
.globals
.bad_query_ratelimiter
.write()
.await
.entry(id)
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
}
};
let mut futures: FuturesUnordered<_> = get_over_federation
.into_iter()
.map(|(server, keys)| async move {
(server, request_keys_from(server, keys).await)
.map(|(server, vec)| async move {
if let Some((time, tries)) = services()
.globals
.bad_query_ratelimiter
.read()
.await
.get(server)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
debug!("Backing off query from {:?}", server);
return (
server,
Err(Error::BadServerResponse("bad query, still backing off")),
);
}
}
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in vec {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
(
server,
tokio::time::timeout(
Duration::from_secs(25),
services().sending.send_federation_request(
server,
federation::keys::get_keys::v1::Request {
device_keys: device_keys_input_fed,
},
),
)
.await
.map_err(|_e| Error::BadServerResponse("Query took too long")),
)
})
.collect();
while let Some((server, response)) = futures.next().await {
let Ok(response) = response else {
failures.insert(server.to_string(), json!({}));
continue;
};
match response {
Ok(Ok(response)) => {
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?;
for (user, masterkey) in response.master_keys {
let (master_key_id, mut master_key) =
services().users.parse_master_key(&user, &masterkey)?;
if let Some(our_master_key) = services().users.get_key(
&master_key_id,
sender_user,
&user,
&allowed_signatures,
)? {
let (_, our_master_key) = services()
.users
.parse_master_key(&user, &our_master_key)?;
for (entity, v) in &*our_master_key.signatures {
for (key_identifier, value) in v {
master_key.signatures.insert_signature(
entity.clone(),
key_identifier.clone(),
value.clone(),
);
if let Some(our_master_key) = services().users.get_key(
&master_key_id,
sender_user,
&user,
&allowed_signatures,
)? {
let (_, our_master_key) =
services().users.parse_master_key(&user, &our_master_key)?;
master_key.signatures.extend(our_master_key.signatures);
}
let json = serde_json::to_value(master_key).expect("to_value always works");
let raw = serde_json::from_value(json).expect("Raw::from_value always works");
services().users.add_cross_signing_keys(
&user, &raw, &None, &None,
false, // Dont notify. A notification would trigger another key request resulting in an endless loop
)?;
master_keys.insert(user, raw);
}
}
let json = serde_json::to_value(master_key)
.expect("to_value always works");
let raw = serde_json::from_value(json)
.expect("Raw::from_value always works");
services().users.add_cross_signing_keys(
&user, &raw, None, None,
// Dont notify. A notification would trigger another key
// request resulting in an endless loop
false,
)?;
master_keys.insert(user, raw);
}
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
self_signing_keys.extend(response.self_signing_keys);
device_keys.extend(response.device_keys);
}
_ => {
back_off(server.to_owned()).await;
failures.insert(server.to_string(), json!({}));
}
}
}
Ok(get_keys::v3::Response {
failures,
device_keys,
master_keys,
self_signing_keys,
user_signing_keys,
device_keys,
failures,
})
}
/// Returns `Err` if key requests to the server are being backed off due to
/// previous errors.
async fn check_key_requests_back_off(server: &ServerName) -> Result<()> {
if let Some((time, tries)) =
services().globals.bad_query_ratelimiter.read().await.get(server)
{
// Exponential backoff
let mut min_elapsed_duration =
Duration::from_secs(30) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if let Some(remaining) =
min_elapsed_duration.checked_sub(time.elapsed())
{
debug!(%server, %tries, ?remaining, "Backing off from server");
return Err(Error::BadServerResponse(
"bad query, still backing off",
));
}
}
Ok(())
}
/// Backs off future remote device key requests to a server after a failure.
async fn back_off_key_requests(server: OwnedServerName) {
match services().globals.bad_query_ratelimiter.write().await.entry(server) {
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1);
}
}
}
/// Stops backing off remote device key requests to a server after a success.
async fn reset_key_request_back_off(server: &ServerName) {
services().globals.bad_query_ratelimiter.write().await.remove(server);
}
/// Requests device keys from a remote server, unless the server is in backoff.
///
/// Updates backoff state depending on the result of the request.
async fn request_keys_from(
server: &ServerName,
keys: Vec<(&UserId, &Vec<OwnedDeviceId>)>,
) -> Result<federation::keys::get_keys::v1::Response> {
check_key_requests_back_off(server).await?;
let result = request_keys_from_inner(server, keys).await;
match &result {
Ok(_) => reset_key_request_back_off(server).await,
Err(error) => {
debug!(%server, %error, "remote device key query failed");
back_off_key_requests(server.to_owned()).await;
}
}
result
}
async fn request_keys_from_inner(
server: &ServerName,
keys: Vec<(&UserId, &Vec<OwnedDeviceId>)>,
) -> Result<federation::keys::get_keys::v1::Response> {
let mut device_keys_input_fed = BTreeMap::new();
for (user_id, keys) in keys {
device_keys_input_fed.insert(user_id.to_owned(), keys.clone());
}
// TODO: switch .and_then(|result| result) to .flatten() when stable
// <https://github.com/rust-lang/rust/issues/70142>
tokio::time::timeout(
Duration::from_secs(25),
services().sending.send_federation_request(
server,
federation::keys::get_keys::v1::Request {
device_keys: device_keys_input_fed,
},
),
)
.await
.map_err(|_e| Error::BadServerResponse("Query took too long"))
.and_then(|result| result)
}
fn add_unsigned_device_display_name(
keys: &mut Raw<ruma::encryption::DeviceKeys>,
metadata: ruma::api::client::device::Device,
) -> serde_json::Result<()> {
if let Some(display_name) = metadata.display_name {
let mut object = keys
.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
if let serde_json::Value::Object(unsigned_object) = unsigned {
unsigned_object
.insert("device_display_name".to_owned(), display_name.into());
unsigned_object.insert("device_display_name".to_owned(), display_name.into());
}
*keys = Raw::from_json(serde_json::value::to_raw_value(&object)?);
@ -556,10 +465,7 @@ fn add_unsigned_device_display_name(
}
pub(crate) async fn claim_keys_helper(
one_time_keys_input: &BTreeMap<
OwnedUserId,
BTreeMap<OwnedDeviceId, OneTimeKeyAlgorithm>,
>,
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
) -> Result<claim_keys::v3::Response> {
let mut one_time_keys = BTreeMap::new();
@ -575,11 +481,11 @@ pub(crate) async fn claim_keys_helper(
let mut container = BTreeMap::new();
for (device_id, key_algorithm) in map {
if let Some(one_time_keys) = services().users.take_one_time_key(
user_id,
device_id,
key_algorithm,
)? {
if let Some(one_time_keys) =
services()
.users
.take_one_time_key(user_id, device_id, key_algorithm)?
{
let mut c = BTreeMap::new();
c.insert(one_time_keys.0, one_time_keys.1);
container.insert(device_id.clone(), c);

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,82 +1,77 @@
use std::collections::{BTreeMap, HashSet};
use crate::{
service::{pdu::PduBuilder, rooms::timeline::PduCount},
services, utils, Error, Result, Ruma,
};
use ruma::{
api::client::{
error::ErrorKind,
message::{get_message_events, send_message_event},
},
events::{StateEventType, TimelineEventType},
uint,
};
use crate::{
service::{pdu::PduBuilder, rooms::timeline::PduCount},
services, utils, Ar, Error, Ra, Result,
use std::{
collections::{BTreeMap, HashSet},
sync::Arc,
};
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
///
/// Send a message event into the room.
///
/// - Is a NOOP if the txn id was already used before and returns the same event
/// id again
/// - Is a NOOP if the txn id was already used before and returns the same event id again
/// - The only requirement for the content is that it has to be valid json
/// - Tries to send the event into the room, auth rules will determine if it is
/// allowed
pub(crate) async fn send_message_event_route(
body: Ar<send_message_event::v3::Request>,
) -> Result<Ra<send_message_event::v3::Response>> {
/// - Tries to send the event into the room, auth rules will determine if it is allowed
pub async fn send_message_event_route(
body: Ruma<send_message_event::v3::Request>,
) -> Result<send_message_event::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_deref();
let room_token = services()
.globals
.roomid_mutex_state
.lock_key(body.room_id.clone())
.await;
let mutex_state = Arc::clone(
services()
.globals
.roomid_mutex_state
.write()
.await
.entry(body.room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
// Forbid m.room.encrypted if encryption is disabled
if TimelineEventType::RoomEncrypted == body.event_type.to_string().into()
&& !services().globals.allow_encryption()
{
return Err(Error::BadRequest(
ErrorKind::forbidden(),
ErrorKind::Forbidden,
"Encryption has been disabled",
));
}
// Check if this is a new transaction id
if let Some(response) = services().transaction_ids.existing_txnid(
sender_user,
sender_device,
&body.txn_id,
)? {
if let Some(response) =
services()
.transaction_ids
.existing_txnid(sender_user, sender_device, &body.txn_id)?
{
// The client might have sent a txnid of the /sendToDevice endpoint
// This txnid has no response associated with it
if response.is_empty() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to use txn id already used for an incompatible \
endpoint.",
"Tried to use txn id already used for an incompatible endpoint.",
));
}
let event_id = utils::string_from_bytes(&response)
.map_err(|_| {
Error::bad_database("Invalid txnid bytes in database.")
})?
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
.try_into()
.map_err(|_| {
Error::bad_database("Invalid event id in txnid data.")
})?;
return Ok(Ra(send_message_event::v3::Response {
event_id,
}));
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::v3::Response { event_id });
}
let mut unsigned = BTreeMap::new();
unsigned
.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
let event_id = services()
.rooms
@ -85,18 +80,14 @@ pub(crate) async fn send_message_event_route(
PduBuilder {
event_type: body.event_type.to_string().into(),
content: serde_json::from_str(body.body.body.json().get())
.map_err(|_| {
Error::BadRequest(
ErrorKind::BadJson,
"Invalid JSON body.",
)
})?,
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
unsigned: Some(unsigned),
state_key: None,
redacts: None,
},
sender_user,
&room_token,
&body.room_id,
&state_lock,
)
.await?;
@ -107,51 +98,45 @@ pub(crate) async fn send_message_event_route(
event_id.as_bytes(),
)?;
drop(room_token);
drop(state_lock);
Ok(Ra(send_message_event::v3::Response::new((*event_id).to_owned())))
Ok(send_message_event::v3::Response::new(
(*event_id).to_owned(),
))
}
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
///
/// Allows paginating through room history.
///
/// - Only works if the user is joined (TODO: always allow, but only show events
/// where the user was joined, depending on `history_visibility`)
#[allow(clippy::too_many_lines)]
pub(crate) async fn get_message_events_route(
body: Ar<get_message_events::v3::Request>,
) -> Result<Ra<get_message_events::v3::Response>> {
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
/// joined, depending on history_visibility)
pub async fn get_message_events_route(
body: Ruma<get_message_events::v3::Request>,
) -> Result<get_message_events::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device =
body.sender_device.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let from = match body.from.clone() {
Some(from) => PduCount::try_from_string(&from)?,
None => match body.dir {
ruma::api::Direction::Forward => PduCount::MIN,
ruma::api::Direction::Backward => PduCount::MAX,
ruma::api::Direction::Forward => PduCount::min(),
ruma::api::Direction::Backward => PduCount::max(),
},
};
let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
let to = body
.to
.as_ref()
.and_then(|t| PduCount::try_from_string(t).ok());
services()
.rooms
.lazy_loading
.lazy_load_confirm_delivery(
sender_user,
sender_device,
&body.room_id,
from,
)
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)
.await?;
let limit = body
.limit
.min(uint!(100))
.try_into()
.expect("0-100 should fit in usize");
let limit = u64::from(body.limit).min(100) as usize;
let next_token;
@ -166,27 +151,21 @@ pub(crate) async fn get_message_events_route(
.timeline
.pdus_after(sender_user, &body.room_id, from)?
.take(limit)
.filter_map(Result::ok)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
.state_accessor
.user_can_see_event(
sender_user,
&body.room_id,
&pdu.event_id,
)
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to)
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect();
for (_, event) in &events_after {
// * https://github.com/vector-im/element-android/issues/3417
// * https://github.com/vector-im/element-web/issues/21034
//
// TODO: When the above issues are resolved, uncomment this:
/*
/* TODO: Remove this when these are resolved:
* https://github.com/vector-im/element-android/issues/3417
* https://github.com/vector-im/element-web/issues/21034
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
@ -196,7 +175,6 @@ pub(crate) async fn get_message_events_route(
lazy_loaded.insert(event.sender.clone());
}
*/
// And delete this line:
lazy_loaded.insert(event.sender.clone());
}
@ -222,27 +200,21 @@ pub(crate) async fn get_message_events_route(
.timeline
.pdus_until(sender_user, &body.room_id, from)?
.take(limit)
.filter_map(Result::ok)
.filter_map(|r| r.ok()) // Filter out buggy events
.filter(|(_, pdu)| {
services()
.rooms
.state_accessor
.user_can_see_event(
sender_user,
&body.room_id,
&pdu.event_id,
)
.user_can_see_event(sender_user, &body.room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|&(k, _)| Some(k) != to)
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
.collect();
for (_, event) in &events_before {
// * https://github.com/vector-im/element-android/issues/3417
// * https://github.com/vector-im/element-web/issues/21034
//
// TODO: When the above issues are resolved, uncomment this:
/*
/* TODO: Remove this when these are resolved:
* https://github.com/vector-im/element-android/issues/3417
* https://github.com/vector-im/element-web/issues/21034
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
sender_user,
sender_device,
@ -252,7 +224,6 @@ pub(crate) async fn get_message_events_route(
lazy_loaded.insert(event.sender.clone());
}
*/
// And delete this line:
lazy_loaded.insert(event.sender.clone());
}
@ -271,21 +242,16 @@ pub(crate) async fn get_message_events_route(
resp.state = Vec::new();
for ll_id in &lazy_loaded {
if let Some(member_event) =
services().rooms.state_accessor.room_state_get(
&body.room_id,
&StateEventType::RoomMember,
ll_id.as_str(),
)?
{
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
&body.room_id,
&StateEventType::RoomMember,
ll_id.as_str(),
)? {
resp.state.push(member_event.to_state_event());
}
}
// * https://github.com/vector-im/element-android/issues/3417
// * https://github.com/vector-im/element-web/issues/21034
//
// TODO: When the above issues are resolved, uncomment this:
// TODO: enable again when we are sure clients can handle it
/*
if let Some(next_token) = next_token {
services().rooms.lazy_loading.lazy_load_mark_sent(
@ -298,5 +264,5 @@ pub(crate) async fn get_message_events_route(
}
*/
Ok(Ra(resp))
Ok(resp)
}

View file

@ -0,0 +1,74 @@
mod account;
mod alias;
mod backup;
mod capabilities;
mod config;
mod context;
mod device;
mod directory;
mod filter;
mod keys;
mod media;
mod membership;
mod message;
mod presence;
mod profile;
mod push;
mod read_marker;
mod redact;
mod relations;
mod report;
mod room;
mod search;
mod session;
mod space;
mod state;
mod sync;
mod tag;
mod thirdparty;
mod threads;
mod to_device;
mod typing;
mod unversioned;
mod user_directory;
mod voip;
pub use account::*;
pub use alias::*;
pub use backup::*;
pub use capabilities::*;
pub use config::*;
pub use context::*;
pub use device::*;
pub use directory::*;
pub use filter::*;
pub use keys::*;
pub use media::*;
pub use membership::*;
pub use message::*;
pub use presence::*;
pub use profile::*;
pub use push::*;
pub use read_marker::*;
pub use redact::*;
pub use relations::*;
pub use report::*;
pub use room::*;
pub use search::*;
pub use session::*;
pub use space::*;
pub use state::*;
pub use sync::*;
pub use tag::*;
pub use thirdparty::*;
pub use threads::*;
pub use to_device::*;
pub use typing::*;
pub use unversioned::*;
pub use user_directory::*;
pub use voip::*;
pub const DEVICE_ID_LENGTH: usize = 10;
pub const TOKEN_LENGTH: usize = 32;
pub const SESSION_ID_LENGTH: usize = 32;
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;

View file

@ -0,0 +1,90 @@
use crate::{services, utils, Error, Result, Ruma};
use ruma::api::client::{
error::ErrorKind,
presence::{get_presence, set_presence},
};
use std::time::Duration;
/// # `PUT /_matrix/client/r0/presence/{userId}/status`
///
/// Sets the presence state of the sender user.
pub async fn set_presence_route(
body: Ruma<set_presence::v3::Request>,
) -> Result<set_presence::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
for room_id in services().rooms.state_cache.rooms_joined(sender_user) {
let room_id = room_id?;
services().rooms.edus.presence.update_presence(
sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: services().users.avatar_url(sender_user)?,
currently_active: None,
displayname: services().users.displayname(sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
),
presence: body.presence.clone(),
status_msg: body.status_msg.clone(),
},
sender: sender_user.clone(),
},
)?;
}
Ok(set_presence::v3::Response {})
}
/// # `GET /_matrix/client/r0/presence/{userId}/status`
///
/// Gets the presence state of the given user.
///
/// - Only works if you share a room with the user
pub async fn get_presence_route(
body: Ruma<get_presence::v3::Request>,
) -> Result<get_presence::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut presence_event = None;
for room_id in services()
.rooms
.user
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
{
let room_id = room_id?;
if let Some(presence) = services()
.rooms
.edus
.presence
.get_last_presence_event(sender_user, &room_id)?
{
presence_event = Some(presence);
break;
}
}
if let Some(presence) = presence_event {
Ok(get_presence::v3::Response {
// TODO: Should ruma just use the presenceeventcontent type here?
status_msg: presence.content.status_msg,
currently_active: presence.content.currently_active,
last_active_ago: presence
.content
.last_active_ago
.map(|millis| Duration::from_millis(millis.into())),
presence: presence.content.presence,
})
} else {
Err(Error::BadRequest(
ErrorKind::NotFound,
"Presence state for this user was not found",
))
}
}

View file

@ -1,41 +1,39 @@
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
use ruma::{
api::{
client::{
error::ErrorKind,
profile::{
get_avatar_url, get_display_name, get_profile, set_avatar_url,
set_display_name,
get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name,
},
},
federation::{self, query::get_profile_information::v1::ProfileField},
},
events::{
room::member::RoomMemberEventContent, StateEventType, TimelineEventType,
},
events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType},
};
use serde_json::value::to_raw_value;
use tracing::warn;
use crate::{service::pdu::PduBuilder, services, Ar, Error, Ra, Result};
use std::sync::Arc;
/// # `PUT /_matrix/client/r0/profile/{userId}/displayname`
///
/// Updates the displayname.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_displayname_route(
body: Ar<set_display_name::v3::Request>,
) -> Result<Ra<set_display_name::v3::Response>> {
pub async fn set_displayname_route(
body: Ruma<set_display_name::v3::Request>,
) -> Result<set_display_name::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().users.set_displayname(sender_user, body.displayname.clone())?;
services()
.users
.set_displayname(sender_user, body.displayname.clone())?;
// Send a new membership event and presence update into all joined rooms
let all_rooms_joined: Vec<_> = services()
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
.filter_map(|r| r.ok())
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
@ -54,18 +52,14 @@ pub(crate) async fn set_displayname_route(
)?
.ok_or_else(|| {
Error::bad_database(
"Tried to send displayname update for \
user not in the room.",
"Tried to send displayname update for user not in the \
room.",
)
})?
.content
.get(),
)
.map_err(|_| {
Error::bad_database(
"Database contains invalid PDU.",
)
})?
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -75,27 +69,50 @@ pub(crate) async fn set_displayname_route(
room_id,
))
})
.filter_map(Result::ok)
.filter_map(|r| r.ok())
.collect();
for (pdu_builder, room_id) in all_rooms_joined {
let room_token = services()
.globals
.roomid_mutex_state
.lock_key(room_id.clone())
.await;
let mutex_state = Arc::clone(
services()
.globals
.roomid_mutex_state
.write()
.await
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
if let Err(error) = services()
let _ = services()
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_token)
.await
{
warn!(%error, "failed to add PDU");
}
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.await;
// Presence update
services().rooms.edus.presence.update_presence(
sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: services().users.avatar_url(sender_user)?,
currently_active: None,
displayname: services().users.displayname(sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
),
presence: ruma::presence::PresenceState::Online,
status_msg: None,
},
sender: sender_user.clone(),
},
)?;
}
Ok(Ra(set_display_name::v3::Response {}))
Ok(set_display_name::v3::Response {})
}
/// # `GET /_matrix/client/r0/profile/{userId}/displayname`
@ -103,9 +120,9 @@ pub(crate) async fn set_displayname_route(
/// Returns the displayname of the user.
///
/// - If user is on another server: Fetches displayname over federation
pub(crate) async fn get_displayname_route(
body: Ar<get_display_name::v3::Request>,
) -> Result<Ra<get_display_name::v3::Response>> {
pub async fn get_displayname_route(
body: Ruma<get_display_name::v3::Request>,
) -> Result<get_display_name::v3::Response> {
if body.user_id.server_name() != services().globals.server_name() {
let response = services()
.sending
@ -118,36 +135,40 @@ pub(crate) async fn get_displayname_route(
)
.await?;
return Ok(Ra(get_display_name::v3::Response {
return Ok(get_display_name::v3::Response {
displayname: response.displayname,
}));
});
}
Ok(Ra(get_display_name::v3::Response {
Ok(get_display_name::v3::Response {
displayname: services().users.displayname(&body.user_id)?,
}))
})
}
/// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Updates the `avatar_url` and `blurhash`.
/// Updates the avatar_url and blurhash.
///
/// - Also makes sure other users receive the update using presence EDUs
pub(crate) async fn set_avatar_url_route(
body: Ar<set_avatar_url::v3::Request>,
) -> Result<Ra<set_avatar_url::v3::Response>> {
pub async fn set_avatar_url_route(
body: Ruma<set_avatar_url::v3::Request>,
) -> Result<set_avatar_url::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().users.set_avatar_url(sender_user, body.avatar_url.clone())?;
services()
.users
.set_avatar_url(sender_user, body.avatar_url.clone())?;
services().users.set_blurhash(sender_user, body.blurhash.clone())?;
services()
.users
.set_blurhash(sender_user, body.blurhash.clone())?;
// Send a new membership event and presence update into all joined rooms
let all_joined_rooms: Vec<_> = services()
.rooms
.state_cache
.rooms_joined(sender_user)
.filter_map(Result::ok)
.filter_map(|r| r.ok())
.map(|room_id| {
Ok::<_, Error>((
PduBuilder {
@ -166,18 +187,14 @@ pub(crate) async fn set_avatar_url_route(
)?
.ok_or_else(|| {
Error::bad_database(
"Tried to send displayname update for \
user not in the room.",
"Tried to send displayname update for user not in the \
room.",
)
})?
.content
.get(),
)
.map_err(|_| {
Error::bad_database(
"Database contains invalid PDU.",
)
})?
.map_err(|_| Error::bad_database("Database contains invalid PDU."))?
})
.expect("event is valid, we just created it"),
unsigned: None,
@ -187,38 +204,60 @@ pub(crate) async fn set_avatar_url_route(
room_id,
))
})
.filter_map(Result::ok)
.filter_map(|r| r.ok())
.collect();
for (pdu_builder, room_id) in all_joined_rooms {
let room_token = services()
.globals
.roomid_mutex_state
.lock_key(room_id.clone())
.await;
let mutex_state = Arc::clone(
services()
.globals
.roomid_mutex_state
.write()
.await
.entry(room_id.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
if let Err(error) = services()
let _ = services()
.rooms
.timeline
.build_and_append_pdu(pdu_builder, sender_user, &room_token)
.await
{
warn!(%error, "failed to add PDU");
};
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
.await;
// Presence update
services().rooms.edus.presence.update_presence(
sender_user,
&room_id,
ruma::events::presence::PresenceEvent {
content: ruma::events::presence::PresenceEventContent {
avatar_url: services().users.avatar_url(sender_user)?,
currently_active: None,
displayname: services().users.displayname(sender_user)?,
last_active_ago: Some(
utils::millis_since_unix_epoch()
.try_into()
.expect("time is valid"),
),
presence: ruma::presence::PresenceState::Online,
status_msg: None,
},
sender: sender_user.clone(),
},
)?;
}
Ok(Ra(set_avatar_url::v3::Response {}))
Ok(set_avatar_url::v3::Response {})
}
/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url`
///
/// Returns the `avatar_url` and `blurhash` of the user.
/// Returns the avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches `avatar_url` and `blurhash` over
/// federation
pub(crate) async fn get_avatar_url_route(
body: Ar<get_avatar_url::v3::Request>,
) -> Result<Ra<get_avatar_url::v3::Response>> {
/// - If user is on another server: Fetches avatar_url and blurhash over federation
pub async fn get_avatar_url_route(
body: Ruma<get_avatar_url::v3::Request>,
) -> Result<get_avatar_url::v3::Response> {
if body.user_id.server_name() != services().globals.server_name() {
let response = services()
.sending
@ -231,26 +270,26 @@ pub(crate) async fn get_avatar_url_route(
)
.await?;
return Ok(Ra(get_avatar_url::v3::Response {
return Ok(get_avatar_url::v3::Response {
avatar_url: response.avatar_url,
blurhash: response.blurhash,
}));
});
}
Ok(Ra(get_avatar_url::v3::Response {
Ok(get_avatar_url::v3::Response {
avatar_url: services().users.avatar_url(&body.user_id)?,
blurhash: services().users.blurhash(&body.user_id)?,
}))
})
}
/// # `GET /_matrix/client/r0/profile/{userId}`
///
/// Returns the `displayname`, `avatar_url` and `blurhash` of the user.
/// Returns the displayname, avatar_url and blurhash of the user.
///
/// - If user is on another server: Fetches profile over federation
pub(crate) async fn get_profile_route(
body: Ar<get_profile::v3::Request>,
) -> Result<Ra<get_profile::v3::Response>> {
pub async fn get_profile_route(
body: Ruma<get_profile::v3::Request>,
) -> Result<get_profile::v3::Response> {
if body.user_id.server_name() != services().globals.server_name() {
let response = services()
.sending
@ -263,11 +302,11 @@ pub(crate) async fn get_profile_route(
)
.await?;
return Ok(Ra(get_profile::v3::Response {
return Ok(get_profile::v3::Response {
displayname: response.displayname,
avatar_url: response.avatar_url,
blurhash: response.blurhash,
}));
});
}
if !services().users.exists(&body.user_id)? {
@ -278,9 +317,9 @@ pub(crate) async fn get_profile_route(
));
}
Ok(Ra(get_profile::v3::Response {
Ok(get_profile::v3::Response {
avatar_url: services().users.avatar_url(&body.user_id)?,
blurhash: services().users.blurhash(&body.user_id)?,
displayname: services().users.displayname(&body.user_id)?,
}))
})
}

View file

@ -1,63 +1,69 @@
use crate::{services, Error, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
push::{
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions,
get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule,
set_pushrule_actions, set_pushrule_enabled,
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
set_pushrule_enabled, RuleScope,
},
},
events::push_rules::PushRulesEventContent,
push::{AnyPushRuleRef, InsertPushRuleError, RemovePushRuleError},
serde::Raw,
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
push::{InsertPushRuleError, RemovePushRuleError},
};
use crate::{services, Ar, Error, Ra, Result};
/// # `GET /_matrix/client/r0/pushrules`
///
/// Retrieves the push rules event for this user.
pub(crate) async fn get_pushrules_all_route(
body: Ar<get_pushrules_all::v3::Request>,
) -> Result<Ra<get_pushrules_all::v3::Response>> {
pub async fn get_pushrules_all_route(
body: Ruma<get_pushrules_all::v3::Request>,
) -> Result<get_pushrules_all::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
Ok(Ra(get_pushrules_all::v3::Response {
Ok(get_pushrules_all::v3::Response {
global: account_data.global,
}))
})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Retrieves a single specified push rule for this user.
pub(crate) async fn get_pushrule_route(
body: Ar<get_pushrule::v3::Request>,
) -> Result<Ra<get_pushrule::v3::Response>> {
pub async fn get_pushrule_route(
body: Ruma<get_pushrule::v3::Request>,
) -> Result<get_pushrule::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
let rule = account_data
.global
@ -65,36 +71,47 @@ pub(crate) async fn get_pushrule_route(
.map(Into::into);
if let Some(rule) = rule {
Ok(Ra(get_pushrule::v3::Response {
rule,
}))
Ok(get_pushrule::v3::Response { rule })
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found."))
Err(Error::BadRequest(
ErrorKind::NotFound,
"Push rule not found.",
))
}
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Creates a single specified push rule for this user.
pub(crate) async fn set_pushrule_route(
body: Ar<set_pushrule::v3::Request>,
) -> Result<Ra<set_pushrule::v3::Response>> {
pub async fn set_pushrule_route(
body: Ruma<set_pushrule::v3::Request>,
) -> Result<set_pushrule::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let mut account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
if let Err(error) = account_data.global.insert(
if let Err(error) = account_data.content.global.insert(
body.rule.clone(),
body.after.as_deref(),
body.before.as_deref(),
@ -102,20 +119,16 @@ pub(crate) async fn set_pushrule_route(
let err = match error {
InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest(
ErrorKind::InvalidParam,
"Rule IDs starting with a dot are reserved for server-default \
rules.",
"Rule IDs starting with a dot are reserved for server-default rules.",
),
InsertPushRuleError::InvalidRuleId => Error::BadRequest(
ErrorKind::InvalidParam,
"Rule ID containing invalid characters.",
),
InsertPushRuleError::RelativeToServerDefaultRule => {
Error::BadRequest(
ErrorKind::InvalidParam,
"Can't place a push rule relatively to a server-default \
rule.",
)
}
InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest(
ErrorKind::InvalidParam,
"Can't place a push rule relatively to a server-default rule.",
),
InsertPushRuleError::UnknownRuleId => Error::BadRequest(
ErrorKind::NotFound,
"The before or after rule could not be found.",
@ -130,34 +143,46 @@ pub(crate) async fn set_pushrule_route(
return Err(err);
}
services().account_data.update_global(
services().account_data.update(
None,
sender_user,
&Raw::new(&account_data)
.expect("json event serialization should always succeed"),
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
Ok(Ra(set_pushrule::v3::Response {}))
Ok(set_pushrule::v3::Response {})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
///
/// Gets the actions of a single specified push rule for this user.
pub(crate) async fn get_pushrule_actions_route(
body: Ar<get_pushrule_actions::v3::Request>,
) -> Result<Ra<get_pushrule_actions::v3::Response>> {
pub async fn get_pushrule_actions_route(
body: Ruma<get_pushrule_actions::v3::Request>,
) -> Result<get_pushrule_actions::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
.content;
let global = account_data.global;
let actions = global
@ -168,32 +193,41 @@ pub(crate) async fn get_pushrule_actions_route(
"Push rule not found.",
))?;
Ok(Ra(get_pushrule_actions::v3::Response {
actions,
}))
Ok(get_pushrule_actions::v3::Response { actions })
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions`
///
/// Sets the actions of a single specified push rule for this user.
pub(crate) async fn set_pushrule_actions_route(
body: Ar<set_pushrule_actions::v3::Request>,
) -> Result<Ra<set_pushrule_actions::v3::Response>> {
pub async fn set_pushrule_actions_route(
body: Ruma<set_pushrule_actions::v3::Request>,
) -> Result<set_pushrule_actions::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let mut account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
if account_data
.content
.global
.set_actions(body.kind.clone(), &body.rule_id, body.actions.clone())
.is_err()
@ -204,70 +238,90 @@ pub(crate) async fn set_pushrule_actions_route(
));
}
services().account_data.update_global(
services().account_data.update(
None,
sender_user,
&Raw::new(&account_data)
.expect("json event serialization should always suceed"),
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
Ok(Ra(set_pushrule_actions::v3::Response {}))
Ok(set_pushrule_actions::v3::Response {})
}
/// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
///
/// Gets the enabled status of a single specified push rule for this user.
pub(crate) async fn get_pushrule_enabled_route(
body: Ar<get_pushrule_enabled::v3::Request>,
) -> Result<Ra<get_pushrule_enabled::v3::Response>> {
pub async fn get_pushrule_enabled_route(
body: Ruma<get_pushrule_enabled::v3::Request>,
) -> Result<get_pushrule_enabled::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
let global = account_data.global;
let global = account_data.content.global;
let enabled = global
.get(body.kind.clone(), &body.rule_id)
.map(AnyPushRuleRef::enabled)
.map(|r| r.enabled())
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"Push rule not found.",
))?;
Ok(Ra(get_pushrule_enabled::v3::Response {
enabled,
}))
Ok(get_pushrule_enabled::v3::Response { enabled })
}
/// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled`
///
/// Sets the enabled status of a single specified push rule for this user.
pub(crate) async fn set_pushrule_enabled_route(
body: Ar<set_pushrule_enabled::v3::Request>,
) -> Result<Ra<set_pushrule_enabled::v3::Response>> {
pub async fn set_pushrule_enabled_route(
body: Ruma<set_pushrule_enabled::v3::Request>,
) -> Result<set_pushrule_enabled::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let mut account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
if account_data
.content
.global
.set_enabled(body.kind.clone(), &body.rule_id, body.enabled)
.is_err()
@ -278,37 +332,50 @@ pub(crate) async fn set_pushrule_enabled_route(
));
}
services().account_data.update_global(
services().account_data.update(
None,
sender_user,
&Raw::new(&account_data)
.expect("json event serialization should always succeed"),
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
Ok(Ra(set_pushrule_enabled::v3::Response {}))
Ok(set_pushrule_enabled::v3::Response {})
}
/// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}`
///
/// Deletes a single specified push rule for this user.
pub(crate) async fn delete_pushrule_route(
body: Ar<delete_pushrule::v3::Request>,
) -> Result<Ra<delete_pushrule::v3::Response>> {
pub async fn delete_pushrule_route(
body: Ruma<delete_pushrule::v3::Request>,
) -> Result<delete_pushrule::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if body.scope != RuleScope::Global {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Scopes other than 'global' are not supported.",
));
}
let event = services()
.account_data
.get_global::<PushRulesEventContent>(sender_user)?
.get(
None,
sender_user,
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
.ok_or(Error::BadRequest(
ErrorKind::NotFound,
"PushRules event not found.",
))?;
let mut account_data = event.deserialize().map_err(|_| {
Error::bad_database("Invalid account data event in db.")
})?;
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
if let Err(error) =
account_data.global.remove(body.kind.clone(), &body.rule_id)
if let Err(error) = account_data
.content
.global
.remove(body.kind.clone(), &body.rule_id)
{
let err = match error {
RemovePushRuleError::ServerDefault => Error::BadRequest(
@ -324,26 +391,27 @@ pub(crate) async fn delete_pushrule_route(
return Err(err);
}
services().account_data.update_global(
services().account_data.update(
None,
sender_user,
&Raw::new(&account_data)
.expect("json event serialization should always suceed"),
GlobalAccountDataEventType::PushRules.to_string().into(),
&serde_json::to_value(account_data).expect("to json value always works"),
)?;
Ok(Ra(delete_pushrule::v3::Response {}))
Ok(delete_pushrule::v3::Response {})
}
/// # `GET /_matrix/client/r0/pushers`
///
/// Gets all currently active pushers for the sender user.
pub(crate) async fn get_pushers_route(
body: Ar<get_pushers::v3::Request>,
) -> Result<Ra<get_pushers::v3::Response>> {
pub async fn get_pushers_route(
body: Ruma<get_pushers::v3::Request>,
) -> Result<get_pushers::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
Ok(Ra(get_pushers::v3::Response {
Ok(get_pushers::v3::Response {
pushers: services().pusher.get_pushers(sender_user)?,
}))
})
}
/// # `POST /_matrix/client/r0/pushers/set`
@ -351,12 +419,14 @@ pub(crate) async fn get_pushers_route(
/// Adds a pusher for the sender user.
///
/// - TODO: Handle `append`
pub(crate) async fn set_pushers_route(
body: Ar<set_pusher::v3::Request>,
) -> Result<Ra<set_pusher::v3::Response>> {
pub async fn set_pushers_route(
body: Ruma<set_pusher::v3::Request>,
) -> Result<set_pusher::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
services().pusher.set_pusher(sender_user, body.action.clone())?;
services()
.pusher
.set_pusher(sender_user, body.action.clone())?;
Ok(Ra(set_pusher::v3::Response::default()))
Ok(set_pusher::v3::Response::default())
}

View file

@ -1,40 +1,36 @@
use std::collections::BTreeMap;
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
use ruma::{
api::client::{
error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt,
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
events::{
receipt::{ReceiptThread, ReceiptType},
RoomAccountDataEventType,
},
events::receipt::{ReceiptThread, ReceiptType},
serde::Raw,
MilliSecondsSinceUnixEpoch,
};
use crate::{
service::rooms::timeline::PduCount, services, Ar, Error, Ra, Result,
};
use std::collections::BTreeMap;
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
///
/// Sets different types of read markers.
///
/// - Updates fully-read account data event to `fully_read`
/// - If `read_receipt` is set: Update private marker and public read receipt
/// EDU
pub(crate) async fn set_read_marker_route(
body: Ar<set_read_marker::v3::Request>,
) -> Result<Ra<set_read_marker::v3::Response>> {
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
pub async fn set_read_marker_route(
body: Ruma<set_read_marker::v3::Request>,
) -> Result<set_read_marker::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if let Some(fully_read) = &body.fully_read {
let fully_read_event =
ruma::events::fully_read::FullyReadEventContent {
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
content: ruma::events::fully_read::FullyReadEventContent {
event_id: fully_read.clone(),
};
services().account_data.update_room(
&body.room_id,
},
};
services().account_data.update(
Some(&body.room_id),
sender_user,
&Raw::new(&fully_read_event)
.expect("json event serialization should always suceed"),
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)?;
}
@ -46,9 +42,14 @@ pub(crate) async fn set_read_marker_route(
}
if let Some(event) = &body.private_read_receipt {
let count = services().rooms.timeline.get_pdu_count(event)?.ok_or(
Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."),
)?;
let count = services()
.rooms
.timeline
.get_pdu_count(event)?
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Event does not exist.",
))?;
let count = match count {
PduCount::Backfilled(_) => {
return Err(Error::BadRequest(
@ -58,11 +59,11 @@ pub(crate) async fn set_read_marker_route(
}
PduCount::Normal(c) => c,
};
services().rooms.edus.read_receipt.private_read_set(
&body.room_id,
sender_user,
count,
)?;
services()
.rooms
.edus
.read_receipt
.private_read_set(&body.room_id, sender_user, count)?;
}
if let Some(event) = &body.read_receipt {
@ -85,32 +86,26 @@ pub(crate) async fn set_read_marker_route(
sender_user,
&body.room_id,
ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(
receipt_content,
),
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)?;
for server in services().rooms.state_cache.room_servers(&body.room_id) {
services().sending.trigger_edu_send(&server?)?;
}
}
Ok(Ra(set_read_marker::v3::Response {}))
Ok(set_read_marker::v3::Response {})
}
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
///
/// Sets private read marker and public read receipt EDU.
pub(crate) async fn create_receipt_route(
body: Ar<create_receipt::v3::Request>,
) -> Result<Ra<create_receipt::v3::Response>> {
pub async fn create_receipt_route(
body: Ruma<create_receipt::v3::Request>,
) -> Result<create_receipt::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
if matches!(
&body.receipt_type,
create_receipt::v3::ReceiptType::Read
| create_receipt::v3::ReceiptType::ReadPrivate
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
) {
services()
.rooms
@ -120,15 +115,16 @@ pub(crate) async fn create_receipt_route(
match body.receipt_type {
create_receipt::v3::ReceiptType::FullyRead => {
let fully_read_event =
ruma::events::fully_read::FullyReadEventContent {
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
content: ruma::events::fully_read::FullyReadEventContent {
event_id: body.event_id.clone(),
};
services().account_data.update_room(
&body.room_id,
},
};
services().account_data.update(
Some(&body.room_id),
sender_user,
&Raw::new(&fully_read_event)
.expect("json event serialization should always succeed"),
RoomAccountDataEventType::FullyRead,
&serde_json::to_value(fully_read_event).expect("to json value always works"),
)?;
}
create_receipt::v3::ReceiptType::Read => {
@ -144,23 +140,16 @@ pub(crate) async fn create_receipt_route(
receipts.insert(ReceiptType::Read, user_receipts);
let mut receipt_content = BTreeMap::new();
receipt_content.insert(body.event_id.clone(), receipts);
receipt_content.insert(body.event_id.to_owned(), receipts);
services().rooms.edus.read_receipt.readreceipt_update(
sender_user,
&body.room_id,
ruma::events::receipt::ReceiptEvent {
content: ruma::events::receipt::ReceiptEventContent(
receipt_content,
),
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
room_id: body.room_id.clone(),
},
)?;
for server in
services().rooms.state_cache.room_servers(&body.room_id)
{
services().sending.trigger_edu_send(&server?)?;
}
}
create_receipt::v3::ReceiptType::ReadPrivate => {
let count = services()
@ -189,5 +178,5 @@ pub(crate) async fn create_receipt_route(
_ => return Err(Error::bad_database("Unsupported receipt type")),
}
Ok(Ra(create_receipt::v3::Response {}))
Ok(create_receipt::v3::Response {})
}

Some files were not shown because too many files have changed in this diff Show more