diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..4c64d1c8 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[env] +RUMA_UNSTABLE_EXHAUSTIVE_TYPES = "1" diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index c78ddbac..00000000 --- a/.dockerignore +++ /dev/null @@ -1,28 +0,0 @@ -# Local build and dev artifacts -target -tests - -# Docker files -Dockerfile* -docker-compose* - -# IDE files -.vscode -.idea -*.iml - -# Git folder -.git -.gitea -.gitlab -.github - -# Dot files -.env -.gitignore - -# Toml files -rustfmt.toml - -# Documentation -#*.md diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index a4e9e432..00000000 --- a/.editorconfig +++ /dev/null @@ -1,15 +0,0 @@ -# EditorConfig is awesome: https://EditorConfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -tab_width = 4 -indent_size = 4 -indent_style = space -insert_final_newline = true -max_line_length = 120 - -[*.nix] -indent_size = 2 diff --git a/.envrc b/.envrc index 403a9bdf..ede83d9a 100644 --- a/.envrc +++ b/.envrc @@ -1,5 +1,17 @@ #!/usr/bin/env bash -use flake +dotenv_if_exists + +system="$(nix eval --impure --raw --expr 'builtins.currentSystem')" +devshell="${DIRENV_DEVSHELL:-default}" + +if command -v nom &> /dev/null && [ -t 0 ]; then + # if nom is available, build the devshell dependencies with it to get nicer + # progress monitoring. Don't do this when stdout is piped, because it shows + # up weird in engage. + nom build ".#devShells.$system.$devshell" +fi + +use flake ".#$devshell" PATH_add bin diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 0e4e01b5..00000000 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1 +0,0 @@ -- [ ] I agree to release my code and all other changes of this PR under the Apache-2.0 license diff --git a/.github/ISSUE_TEMPLATE/Issue.md b/.github/ISSUE_TEMPLATE/Issue.md deleted file mode 100644 index 78896651..00000000 --- a/.github/ISSUE_TEMPLATE/Issue.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: "Issue with / Feature Request for Conduit" -about: "Please file issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new" -title: "CLOSE ME" ---- - - - -**⚠️ Conduit development does not happen on GitHub. Issues opened here will not be addressed** - -Please open issues on GitLab: https://gitlab.com/famedly/conduit/-/issues/new diff --git a/.gitignore b/.gitignore index 73ce2e1f..24b746e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,76 +1,19 @@ -# CMake -cmake-build-*/ +# Local environment overrides +/.env -# IntelliJ -.idea/ -out/ -*.iml -modules.xml -*.ipr - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# Linux backup files -*~ - -# temporary files which can be created if a process still has a handle open of a deleted file -.fuse_hidden* - -# KDE directory preferences -.directory - -# Linux trash folder which might appear on any partition or disk -.Trash-* - -# .nfs files are created when an open file is removed but is still being accessed -.nfs* - -# Rust -/target/ - -### vscode ### -.vscode/* -!.vscode/tasks.json -!.vscode/launch.json -!.vscode/extensions.json -*.code-workspace - -### Windows ### -# Windows thumbnail cache files -Thumbs.db -Thumbs.db:encryptable -ehthumbs.db -ehthumbs_vista.db - -# Dump file -*.stackdump - -# Folder config file -[Dd]esktop.ini - -# Recycle Bin used on file shares -$RECYCLE.BIN/ - -# Windows shortcuts -*.lnk - -# Conduit -conduit.toml -conduit.db - -# Etc. -**/*.rs.bk -cached_target - -# Nix artifacts -/result* +# Cargo artifacts +target # Direnv cache /.direnv -# Gitlab CI cache -/.gitlab-ci.d +# Nix artifacts +result* -# mdbook output -public/ \ No newline at end of file +# GitLab CI artifacts +/.gitlab-ci.d +/grapevine-static-aarch64-unknown-linux-musl +/grapevine-static-x86_64-unknown-linux-musl + +# mdbook artifacts +/public diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8c880b9c..911feeab 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,184 +1,82 @@ stages: - ci - artifacts - - publish + - deploy variables: # Makes some things print in color TERM: ansi -# Avoid duplicate pipelines -# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines -workflow: - rules: - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS - when: never - - if: $CI - before_script: - # Enable nix-command and flakes - - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi + - | + # Enable nix-command and flakes + echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf - # Add our own binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix.computer.surgery/conduit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:ZGAf6P6LhNvnoJJ3Me3PRg7tlLSrPxcQ2RiE5LIppjo=" >> /etc/nix/nix.conf; fi + # Disable IFD, to ensure we are able to build without it + echo "allow-import-from-derivation = false" >> /etc/nix/nix.conf - # Add alternate binary cache - - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi + # Add crane binary cache + echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf + echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf - # Add crane binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi + # Add nix-community binary cache + echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf + echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf - # Add nix-community binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi + # Add our own binary cache + if [ -n "$ATTIC_ENDPOINT" ] && [ -n "$ATTIC_CACHE" ] && [ -n "$ATTIC_PUBLIC_KEY" ]; then + echo "extra-substituters = $ATTIC_ENDPOINT/$ATTIC_CACHE" >> /etc/nix/nix.conf + echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf + fi - # Install direnv and nix-direnv - - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi + # Install direnv + nix profile install --impure --inputs-from . nixpkgs#direnv - # Allow .envrc - - if command -v nix > /dev/null; then direnv allow; fi + # Allow .envrc + direnv allow - # Set CARGO_HOME to a cacheable path - - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" + # Set CARGO_HOME to a cacheable path + export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" + +cache-ci-deps: + stage: ci + image: git.lix.systems/lix-project/lix:2.93.3 + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + script: direnv exec . job cache-ci-deps ci: stage: ci - image: nixos/nix:2.20.4 + image: git.lix.systems/lix-project/lix:2.93.3 + rules: + - if: $CI_PIPELINE_SOURCE == 'merge_request_event' script: - # Cache the inputs required for the devShell - - ./bin/nix-build-and-cache .#devShells.x86_64-linux.default.inputDerivation - - - direnv exec . engage + - direnv exec . job ci cache: - key: nix paths: - target - .gitlab-ci.d - rules: - # CI on upstream runners (only available for maintainers) - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" - # Manual CI on unprotected branches that are not MRs - - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" - when: manual - # Manual CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - - if: $CI - interruptible: true artifacts: stage: artifacts - image: nixos/nix:2.20.4 + image: git.lix.systems/lix-project/lix:2.93.3 + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + - if: $CI_PIPELINE_SOURCE == 'merge_request_event' script: - - ./bin/nix-build-and-cache .#static-x86_64-unknown-linux-musl - - cp result/bin/conduit x86_64-unknown-linux-musl - - - mkdir -p target/release - - cp result/bin/conduit target/release - - direnv exec . cargo deb --no-build - - mv target/debian/*.deb x86_64-unknown-linux-musl.deb - - # Since the OCI image package is based on the binary package, this has the - # fun side effect of uploading the normal binary too. Conduit users who are - # deploying with Nix can leverage this fact by adding our binary cache to - # their systems. - # - # Note that although we have an `oci-image-x86_64-unknown-linux-musl` - # output, we don't build it because it would be largely redundant to this - # one since it's all containerized anyway. - - ./bin/nix-build-and-cache .#oci-image - - cp result oci-image-amd64.tar.gz - - - ./bin/nix-build-and-cache .#static-aarch64-unknown-linux-musl - - cp result/bin/conduit aarch64-unknown-linux-musl - - - ./bin/nix-build-and-cache .#oci-image-aarch64-unknown-linux-musl - - cp result oci-image-arm64v8.tar.gz - - - ./bin/nix-build-and-cache .#book - # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 - - cp -r --dereference result public + - direnv exec . job artifacts artifacts: paths: - - x86_64-unknown-linux-musl - - aarch64-unknown-linux-musl - - x86_64-unknown-linux-musl.deb - - oci-image-amd64.tar.gz - - oci-image-arm64v8.tar.gz - - public - rules: - # CI required for all MRs - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - # Optional CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - allow_failure: true - - if: $CI - interruptible: true - -.push-oci-image: - stage: publish - image: docker:25.0.0 - services: - - docker:25.0.0-dind - variables: - IMAGE_SUFFIX_AMD64: amd64 - IMAGE_SUFFIX_ARM64V8: arm64v8 - script: - - docker load -i oci-image-amd64.tar.gz - - IMAGE_ID_AMD64=$(docker images -q conduit:next) - - docker load -i oci-image-arm64v8.tar.gz - - IMAGE_ID_ARM64V8=$(docker images -q conduit:next) - # Tag and push the architecture specific images - - docker tag $IMAGE_ID_AMD64 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 - - docker tag $IMAGE_ID_ARM64V8 $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 - - docker push $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - # Tag the multi-arch image - - docker manifest create $IMAGE_NAME:$CI_COMMIT_SHA --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - - docker manifest push $IMAGE_NAME:$CI_COMMIT_SHA - # Tag and push the git ref - - docker manifest create $IMAGE_NAME:$CI_COMMIT_REF_NAME --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - - docker manifest push $IMAGE_NAME:$CI_COMMIT_REF_NAME - # Tag git tags as 'latest' - - | - if [[ -n "$CI_COMMIT_TAG" ]]; then - docker manifest create $IMAGE_NAME:latest --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_AMD64 --amend $IMAGE_NAME:$CI_COMMIT_SHA-$IMAGE_SUFFIX_ARM64V8 - docker manifest push $IMAGE_NAME:latest - fi - dependencies: - - artifacts - only: - - next - - master - - tags - -oci-image:push-gitlab: - extends: .push-oci-image - variables: - IMAGE_NAME: $CI_REGISTRY_IMAGE/matrix-conduit - before_script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY - -oci-image:push-dockerhub: - extends: .push-oci-image - variables: - IMAGE_NAME: matrixconduit/matrix-conduit - before_script: - - docker login -u $DOCKER_HUB_USER -p $DOCKER_HUB_PASSWORD + - grapevine-static-aarch64-unknown-linux-musl + - grapevine-static-x86_64-unknown-linux-musl pages: - stage: publish - dependencies: - - artifacts - only: - - next + stage: deploy + image: git.lix.systems/lix-project/lix:2.93.3 + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH script: - - "true" + - direnv exec . job pages artifacts: paths: - public diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS deleted file mode 100644 index 665aaaa7..00000000 --- a/.gitlab/CODEOWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# Nix things -.envrc @CobaltCause -flake.lock @CobaltCause -flake.nix @CobaltCause -nix/ @CobaltCause diff --git a/.gitlab/issue_templates/Bug Report.md b/.gitlab/issue_templates/Bug Report.md deleted file mode 100644 index 3e66d43c..00000000 --- a/.gitlab/issue_templates/Bug Report.md +++ /dev/null @@ -1,19 +0,0 @@ - - -### Description - - -### System Configuration - - -Conduit Version: -Database backend (default is sqlite): sqlite - - -/label ~conduit diff --git a/.gitlab/issue_templates/Feature Request.md b/.gitlab/issue_templates/Feature Request.md deleted file mode 100644 index 3f636e71..00000000 --- a/.gitlab/issue_templates/Feature Request.md +++ /dev/null @@ -1,17 +0,0 @@ - - - -### Is your feature request related to a problem? Please describe. - - - - -### Describe the solution you'd like - - - - -/label ~conduit diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md deleted file mode 100644 index c592a3b2..00000000 --- a/.gitlab/merge_request_templates/MR.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt` and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml deleted file mode 100644 index 2c23079c..00000000 --- a/.gitlab/route-map.yml +++ /dev/null @@ -1,3 +0,0 @@ -# Docs: Map markdown to html files -- source: /docs/(.+)\.md/ - public: '\1.html' \ No newline at end of file diff --git a/.gitlab/setup-buildx-remote-builders.sh b/.gitlab/setup-buildx-remote-builders.sh deleted file mode 100644 index 29d50dde..00000000 --- a/.gitlab/setup-buildx-remote-builders.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -set -eux - -# --------------------------------------------------------------------- # -# # -# Configures docker buildx to use a remote server for arm building. # -# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with # -# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP # -# # -# This is expected to only be used in the official CI/CD pipeline! # -# # -# Requirements: openssh-client, docker buildx # -# Inspired by: https://depot.dev/blog/building-arm-containers # -# # -# --------------------------------------------------------------------- # - -cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add - - -# Test server connections: -ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a" -ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a" - -# Connect remote arm64 server for all arm builds: -docker buildx create \ - --name "multi" \ - --driver "docker-container" \ - --platform "linux/arm64,linux/arm/v7" \ - "ssh://$ARM_SERVER_USER@$ARM_SERVER_IP" - -# Connect remote amd64 server for adm64 builds: -docker buildx create --append \ - --name "multi" \ - --driver "docker-container" \ - --platform "linux/amd64" \ - "ssh://$AMD_SERVER_USER@$AMD_SERVER_IP" - -docker buildx use multi diff --git a/.lycheeignore b/.lycheeignore new file mode 120000 index 00000000..3e4e48b0 --- /dev/null +++ b/.lycheeignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..a4ac7d66 --- /dev/null +++ b/.mailmap @@ -0,0 +1 @@ +Olivia Lee diff --git a/.markdownlintignore b/.markdownlintignore new file mode 120000 index 00000000..3e4e48b0 --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.vscode/extensions.json b/.vscode/extensions.json deleted file mode 100644 index 037f20d7..00000000 --- a/.vscode/extensions.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "recommendations": [ - "rust-lang.rust-analyzer", - "bungcip.better-toml", - "ms-azuretools.vscode-docker", - "eamodio.gitlens", - "serayuzgur.crates", - "vadimcn.vscode-lldb", - "timonwong.shellcheck" - ] -} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json deleted file mode 100644 index da521604..00000000 --- a/.vscode/launch.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - // Use IntelliSense to learn about possible attributes. - // Hover to view descriptions of existing attributes. - // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 - "version": "0.2.0", - "configurations": [ - { - "type": "lldb", - "request": "launch", - "name": "Debug conduit", - "sourceLanguages": ["rust"], - "cargo": { - "args": [ - "build", - "--bin=conduit", - "--package=conduit" - ], - "filter": { - "name": "conduit", - "kind": "bin" - } - }, - "args": [], - "env": { - "RUST_BACKTRACE": "1", - "CONDUIT_CONFIG": "", - "CONDUIT_SERVER_NAME": "localhost", - "CONDUIT_DATABASE_PATH": "/tmp", - "CONDUIT_ADDRESS": "0.0.0.0", - "CONDUIT_PORT": "6167" - }, - "cwd": "${workspaceFolder}" - } - ] -} \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 1b060350..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,134 +0,0 @@ - -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, caste, color, religion, or sexual -identity and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the overall - community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or advances of - any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email address, - without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at -coc@koesters.xyz or over Matrix at @timo:conduit.rs. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series of -actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or permanent -ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within the -community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.1, available at -[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. - -Community Impact Guidelines were inspired by -[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. - -For answers to common questions about this code of conduct, see the FAQ at -[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at -[https://www.contributor-covenant.org/translations][translations]. - -[homepage]: https://www.contributor-covenant.org -[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html -[Mozilla CoC]: https://github.com/mozilla/diversity -[FAQ]: https://www.contributor-covenant.org/faq -[translations]: https://www.contributor-covenant.org/translations - diff --git a/Cargo.lock b/Cargo.lock index 0063a3e0..6c7c97d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,78 +1,82 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] -name = "allocator-api2" -version = "0.2.16" +name = "anstyle" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] -name = "anstyle" -version = "1.0.6" +name = "anyhow" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "arc-swap" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d0060af21e8d11a926981cc00c6c1541aa91dd64b9f881985c3da1094425f" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] -name = "arrayref" -version = "0.3.7" +name = "argon2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" - -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash", +] [[package]] name = "as_variant" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" + +[[package]] +name = "assert_cmd" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd389a4b2970a01282ee455294913c0a43724daedcd1a24c3eb0ec1c1320b66" +dependencies = [ + "anstyle", + "bstr", + "doc-comment", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] [[package]] name = "assign" @@ -81,46 +85,65 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-trait" -version = "0.1.77" +name = "async-stream" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] -name = "atomic" -version = "0.6.0" +name = "async-trait" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ - "bytemuck", + "proc-macro2", + "quote", + "syn", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" -version = "1.1.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.6.20" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "headers", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "itoa", "matchit", "memchr", @@ -133,43 +156,74 @@ dependencies = [ "serde_path_to_error", "serde_urlencoded", "sync_wrapper", - "tower", + "tokio", + "tower 0.5.2", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-extra" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04" +dependencies = [ + "axum", + "axum-core", + "bytes", + "fastrand", + "futures-util", + "headers", + "http", + "http-body", + "http-body-util", + "mime", + "multer", + "pin-project-lite", + "serde", + "tower 0.5.2", "tower-layer", "tower-service", ] [[package]] name = "axum-server" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" +version = "0.7.2+grapevine-1" +source = "git+https://gitlab.computer.surgery/matrix/thirdparty/axum-server.git?rev=v0.7.2%2Bgrapevine-1#1f9b20296494792a1f09ab14689f3b2954b4f782" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", "hyper", + "hyper-util", "pin-project-lite", "rustls", "rustls-pemfile", + "rustls-pki-types", "tokio", "tokio-rustls", "tower-service", @@ -177,49 +231,65 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] name = "base64" -version = "0.21.7" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bindgen" -version = "0.69.4" +version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.1", "cexpr", "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", + "itertools 0.13.0", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn", +] + +[[package]] +name = "bindgen" +version = "0.72.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" +dependencies = [ + "bitflags 2.9.1", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn", ] [[package]] @@ -230,19 +300,17 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] -name = "blake2b_simd" -version = "1.0.2" +name = "blake2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", + "digest", ] [[package]] @@ -255,48 +323,59 @@ dependencies = [ ] [[package]] -name = "bumpalo" -version = "3.15.4" +name = "bstr" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", + "regex-automata 0.4.9", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" [[package]] -name = "byteorder" -version = "1.5.0" +name = "byteorder-lite" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.5.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] [[package]] name = "cc" -version = "1.0.90" +version = "1.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" +checksum = "c3a42d84bb6b69d3a8b3eaacf0d88f179e1929695e1ad012b6cf64d9caaa5fd2" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -310,21 +389,21 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -333,9 +412,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.2" +version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "ed87a9d530bb41a67537289bafcac159cb3ee28460e0a4571123d2a778a6a882" dependencies = [ "clap_builder", "clap_derive", @@ -343,31 +422,32 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.2" +version = "4.5.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "64f4f3f3c77c94aff3c7e9aac9a2ca1974a5adf392a8bb751e827d6d127ab966" dependencies = [ "anstyle", "clap_lex", + "terminal_size", ] [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "color_quant" @@ -376,58 +456,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] -name = "conduit" -version = "0.7.0" +name = "console" +version = "0.15.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" dependencies = [ - "async-trait", - "axum", - "axum-server", - "base64", - "bytes", - "clap", - "directories", - "figment", - "futures-util", - "hmac", - "http", - "hyper", - "image", - "jsonwebtoken", - "lazy_static", - "lru-cache", - "nix", - "num_cpus", - "opentelemetry", - "opentelemetry-jaeger", - "parking_lot", - "persy", - "rand", - "regex", - "reqwest", - "ring", - "ruma", - "rusqlite", - "rust-argon2", - "rust-rocksdb", - "sd-notify", - "serde", - "serde_html_form", - "serde_json", - "serde_yaml", - "sha-1", - "thiserror", - "thread_local", - "threadpool", - "tikv-jemalloc-ctl", - "tikv-jemallocator", - "tokio", - "tower", - "tower-http", - "tracing", - "tracing-flame", - "tracing-opentelemetry", - "tracing-subscriber", - "trust-dns-resolver", + "encode_unicode", + "libc", + "once_cell", + "windows-sys 0.59.0", ] [[package]] @@ -438,21 +475,15 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_panic" -version = "0.2.8" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" - -[[package]] -name = "constant_time_eq" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "b98d1483e98c9d67f341ab4b3915cfdc54740bd6f5cccc9226ee0535d86aa8fb" [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -460,58 +491,28 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] -[[package]] -name = "crc" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" - [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam-channel" -version = "0.5.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" -dependencies = [ - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" - [[package]] name = "crypto-common" version = "0.1.6" @@ -524,16 +525,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest", "fiat-crypto", - "platforms", "rustc_version", "subtle", "zeroize", @@ -547,33 +547,26 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", -] - -[[package]] -name = "dashmap" -version = "5.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if", - "hashbrown 0.14.3", - "lock_api", - "once_cell", - "parking_lot_core", + "syn", ] [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "date_header" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c03c416ed1a30fbb027ef484ba6ab6f80e1eada675e1a2b92fd673c045a1f1d" [[package]] name = "der" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "zeroize", @@ -581,13 +574,19 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.10.7" @@ -600,24 +599,21 @@ dependencies = [ ] [[package]] -name = "directories" -version = "4.0.1" +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "dirs-sys", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "dirs-sys" -version = "0.3.7" +name = "doc-comment" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi", -] +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "ed25519" @@ -631,13 +627,13 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "subtle", @@ -646,42 +642,58 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "encode_unicode" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ "heck", "proc-macro2", "quote", - "syn 1.0.109", + "syn", ] [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -690,50 +702,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] -name = "fdeflate" -version = "0.3.4" +name = "fastrand" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f9bfee30e4dedf0ab8b422f03af778d9612b63f502710fc500a334ebe2de645" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fdeflate" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c" dependencies = [ "simd-adler32", ] [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" - -[[package]] -name = "figment" -version = "0.10.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b6e5bc7bd59d60d0d45a6ccab6cf0f4ce28698fb4e81e750ddf229c9b824026" -dependencies = [ - "atomic", - "pear", - "serde", - "toml", - "uncased", - "version_check", -] +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "flate2" -version = "1.0.28" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -744,51 +763,35 @@ dependencies = [ ] [[package]] -name = "fs2" -version = "0.4.3" +name = "fs-err" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", + "autocfg", + "tokio", ] [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", - "futures-sink", ] [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -797,46 +800,43 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ - "futures-channel", "futures-core", - "futures-io", "futures-macro", "futures-sink", "futures-task", - "memchr", "pin-project-lite", "pin-utils", "slab", @@ -854,22 +854,36 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.1+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", "wasm-bindgen", ] [[package]] name = "gif" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb2d69b19215e18bb912fa30f7ce15846e301408695e44e0ef719f1da9e19f2" +checksum = "4ae047235e33e2829703574b54fdec96bfbad892062d97fed2f76022287de61b" dependencies = [ "color_quant", "weezl", @@ -877,29 +891,95 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "grapevine" +version = "0.1.0" +dependencies = [ + "argon2", + "assert_cmd", + "async-trait", + "axum", + "axum-extra", + "axum-server", + "base64", + "bytes", + "clap", + "futures-util", + "hmac", + "html-escape", + "http", + "http-body-util", + "hyper", + "image", + "insta", + "jsonwebtoken", + "lru-cache", + "nix", + "num_cpus", + "opentelemetry", + "opentelemetry-jaeger-propagator", + "opentelemetry-otlp", + "opentelemetry-prometheus", + "opentelemetry_sdk", + "parking_lot", + "phf", + "pin-project-lite", + "predicates", + "prometheus", + "proxy-header", + "rand 0.8.5", + "regex", + "reqwest", + "ring", + "ruma", + "rusqlite", + "rust-rocksdb", + "rustls", + "sd-notify", + "serde", + "serde_html_form", + "serde_json", + "serde_yaml", + "sha-1", + "strum", + "tempfile", + "thiserror 2.0.12", + "thread_local", + "tokio", + "toml", + "tower 0.5.2", + "tower-http", + "tracing", + "tracing-flame", + "tracing-opentelemetry", + "tracing-subscriber", + "trust-dns-resolver", + "xdg", +] [[package]] name = "h2" -version = "0.3.24" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", - "indexmap 2.2.5", + "indexmap 2.10.0", "slab", "tokio", "tokio-util", @@ -914,28 +994,27 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ - "ahash", - "allocator-api2", + "foldhash", ] [[package]] name = "hashlink" -version = "0.8.4" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.15.4", ] [[package]] name = "headers" -version = "0.3.9" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" dependencies = [ "base64", "bytes", @@ -948,24 +1027,24 @@ dependencies = [ [[package]] name = "headers-core" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ "http", ] [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hmac" @@ -977,21 +1056,19 @@ dependencies = [ ] [[package]] -name = "hostname" -version = "0.3.1" +name = "html-escape" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "6d1ad449764d627e22bfd7cd5e8868264fc9236e07c752972b4080cd351cb476" dependencies = [ - "libc", - "match_cfg", - "winapi", + "utf8-width", ] [[package]] name = "http" -version = "0.2.12" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -999,27 +1076,42 @@ dependencies = [ ] [[package]] -name = "http-body" -version = "0.4.6" +name = "http-auth" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "150fa4a9462ef926824cf4519c84ed652ca8f4fbae34cb8af045b5cbcaf98822" +dependencies = [ + "memchr", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" -version = "1.8.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1029,13 +1121,12 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.28" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", "h2", "http", @@ -1044,61 +1135,196 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "smallvec", "tokio", - "tower-service", - "tracing", "want", ] [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http", "hyper", + "hyper-util", "rustls", + "rustls-native-certs", + "rustls-pki-types", "tokio", "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", ] [[package]] name = "idna" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] name = "image" -version = "0.24.9" +version = "0.25.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", - "byteorder", + "byteorder-lite", "color_quant", "gif", - "jpeg-decoder", "num-traits", "png", + "zune-core", + "zune-jpeg", ] [[package]] @@ -1113,26 +1339,40 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.15.4", "serde", ] [[package]] -name = "inlinable_string" -version = "0.1.15" +name = "insta" +version = "1.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" +checksum = "154934ea70c58054b556dd430b99a98c2a7ff5309ac9891597e339b5c28f4371" +dependencies = [ + "console", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde", + "similar", +] [[package]] -name = "integer-encoding" -version = "3.0.4" +name = "io-uring" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] [[package]] name = "ipconfig" @@ -1140,7 +1380,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.10", "widestring", "windows-sys 0.48.0", "winreg", @@ -1148,55 +1388,61 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] [[package]] name = "itertools" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itertools" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.28" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab46a6e9526ddef3ae7f787c06f0f2600639ba80ea3eade3d8e670a2230f51d6" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.3", "libc", ] -[[package]] -name = "jpeg-decoder" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" - [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -1220,9 +1466,9 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "9.2.0" +version = "9.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" +checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" dependencies = [ "base64", "js-sys", @@ -1235,9 +1481,9 @@ dependencies = [ [[package]] name = "konst" -version = "0.3.8" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" +checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9" dependencies = [ "const_panic", "konst_kernel", @@ -1246,57 +1492,40 @@ dependencies = [ [[package]] name = "konst_kernel" -version = "0.3.8" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" +checksum = "e4b1eb7788f3824c629b1116a7a9060d6e898c358ebff59070093d51103dcc3c" dependencies = [ "typewit", ] [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.52.4", -] - -[[package]] -name = "libredox" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" -dependencies = [ - "bitflags 2.4.2", - "libc", - "redox_syscall", + "windows-targets 0.53.3", ] [[package]] name = "libsqlite3-sys" -version = "0.26.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" dependencies = [ "cc", "pkg-config", @@ -1305,9 +1534,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -1321,10 +1550,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "lock_api" -version = "0.4.11" +name = "linux-raw-sys" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -1332,9 +1573,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "lru-cache" @@ -1346,10 +1587,16 @@ dependencies = [ ] [[package]] -name = "lz4-sys" -version = "1.9.4" +name = "lru-slab" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -1361,12 +1608,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -1376,12 +1617,6 @@ dependencies = [ "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" - [[package]] name = "matchit" version = "0.7.3" @@ -1390,9 +1625,9 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "mime" @@ -1408,32 +1643,49 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ - "adler", + "adler2", "simd-adler32", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi", - "windows-sys 0.48.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "memchr", + "mime", + "spin", + "version_check", ] [[package]] name = "nix" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.1", "cfg-if", "cfg_aliases", "libc", @@ -1449,6 +1701,12 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -1461,11 +1719,10 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] @@ -1487,18 +1744,18 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ "hermit-abi", "libc", @@ -1506,108 +1763,112 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" -version = "0.18.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" +checksum = "4c365a63eec4f55b7efeceb724f1336f26a9cf3427b70e59e2cd2a5b947fba96" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", -] - -[[package]] -name = "opentelemetry-jaeger" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" -dependencies = [ - "async-trait", - "futures", - "futures-executor", - "once_cell", - "opentelemetry", - "opentelemetry-semantic-conventions", - "thiserror", - "thrift", - "tokio", -] - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" -dependencies = [ - "opentelemetry", -] - -[[package]] -name = "opentelemetry_api" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" -dependencies = [ - "fnv", - "futures-channel", - "futures-util", - "indexmap 1.9.3", + "futures-core", + "futures-sink", "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.69", +] + +[[package]] +name = "opentelemetry-jaeger-propagator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0a68a13b92fc708d875ad659b08b35d08b8ef2403e01944b39ca21e5b08b17" +dependencies = [ + "opentelemetry", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b925a602ffb916fb7421276b86756027b37ee708f9dce2dbdcc51739f07e727" +dependencies = [ + "async-trait", + "futures-core", + "http", + "opentelemetry", + "opentelemetry-proto", + "opentelemetry_sdk", + "prost", + "thiserror 1.0.69", + "tokio", + "tonic", +] + +[[package]] +name = "opentelemetry-prometheus" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc4191ce34aa274621861a7a9d68dbcf618d5b6c66b10081631b61fd81fbc015" +dependencies = [ + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "prometheus", + "protobuf", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", ] [[package]] name = "opentelemetry_sdk" -version = "0.18.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +checksum = "692eac490ec80f24a17828d49b40b60f5aeaccdfe6a503f939713afd22bc28df" dependencies = [ "async-trait", - "crossbeam-channel", - "dashmap", - "fnv", "futures-channel", "futures-executor", "futures-util", + "glob", "once_cell", - "opentelemetry_api", + "opentelemetry", "percent-encoding", - "rand", - "thiserror", + "rand 0.8.5", + "serde_json", + "thiserror 1.0.69", "tokio", "tokio-stream", ] -[[package]] -name = "ordered-float" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" -dependencies = [ - "num-traits", -] - [[package]] name = "overload" version = "0.1.1" @@ -1616,9 +1877,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -1626,51 +1887,33 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] -name = "paste" -version = "1.0.14" +name = "password-hash" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - -[[package]] -name = "pear" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ccca0f6c17acc81df8e242ed473ec144cbf5c98037e69aa6d144780aad103c8" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ - "inlinable_string", - "pear_codegen", - "yansi", -] - -[[package]] -name = "pear_codegen" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e22670e8eb757cff11d6c199ca7b987f352f0346e0be4dd23869ec72cb53c77" -dependencies = [ - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn 2.0.52", + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] name = "pem" -version = "3.0.3" +version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ "base64", "serde", @@ -1683,46 +1926,116 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] -name = "persy" -version = "1.5.0" +name = "pest" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef4b7250ab3a90ded0e284b2633469c23ef01ea868fe7cbb64e2f0a7d6f6d02" +checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323" dependencies = [ - "crc", - "data-encoding", - "fs2", - "linked-hash-map", - "rand", - "thiserror", - "unsigned-varint", - "zigzag", + "memchr", + "thiserror 2.0.12", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb056d9e8ea77922845ec74a1c4e8fb17e7c218cc4fc11a15c5d25e189aa40bc" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e404e638f781eb3202dc82db6760c8ae8a1eeef7fb3fa8264b2ef280504966" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edd1101f170f5903fde0914f899bb503d9ff5271d7ba76bbb70bea63690cc0d5" +dependencies = [ + "pest", + "sha2", +] + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -1742,21 +2055,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "platforms" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "png" -version = "0.17.13" +version = "0.17.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e4b0d3d1312775e782c86c91a111aa1f910cbb65e1337f9975b5f9a554b5e1" +checksum = "82151a2fc869e011c153adc57cf2789ccb8d9906ce52c0b39a6b5697749d7526" dependencies = [ "bitflags 1.3.2", "crc32fast", @@ -1765,6 +2072,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -1773,57 +2089,185 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "predicates" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +dependencies = [ + "anstyle", + "difflib", + "float-cmp", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] [[package]] name = "proc-macro-crate" -version = "2.0.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml_datetime", "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] [[package]] -name = "proc-macro2-diagnostics" -version = "0.10.1" +name = "prometheus" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", - "version_check", - "yansi", + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror 1.0.69", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "prost" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + +[[package]] +name = "proxy-header" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1493f63ddddfba840c3169e997c2905d09538ace72d64e84af6324c6e0e065" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.5.10", + "thiserror 2.0.12", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" +dependencies = [ + "bytes", + "getrandom 0.3.3", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.10", + "tracing", + "windows-sys 0.59.0", +] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -1831,8 +2275,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -1842,7 +2296,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -1851,39 +2315,37 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_users" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" -dependencies = [ - "getrandom", - "libredox", - "thiserror", + "bitflags 2.9.1", ] [[package]] name = "regex" -version = "1.10.3" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.6", - "regex-syntax 0.8.2", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] @@ -1897,13 +2359,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.5", ] [[package]] @@ -1914,81 +2376,73 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.11.26" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bf93c4af7a8bb7d879d51cebe797356ff10ae8516ace542b5182d9dcac10b2" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64", "bytes", - "encoding_rs", "futures-core", - "futures-util", "h2", "http", "http-body", + "http-body-util", "hyper", "hyper-rustls", - "ipnet", + "hyper-util", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", + "quinn", "rustls", "rustls-native-certs", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", "tokio-rustls", - "tokio-socks", + "tower 0.5.2", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", ] [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] +checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "ring" -version = "0.17.8" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.16", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] [[package]] name = "ruma" -version = "0.9.4" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.12.2" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "assign", "js_int", @@ -1998,16 +2452,16 @@ dependencies = [ "ruma-common", "ruma-events", "ruma-federation-api", - "ruma-identity-service-api", "ruma-push-gateway-api", "ruma-signatures", "ruma-state-res", + "web-time", ] [[package]] name = "ruma-appservice-api" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.12.1" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "js_int", "ruma-common", @@ -2018,12 +2472,13 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.17.4" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.20.2" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "as_variant", "assign", "bytes", + "date_header", "http", "js_int", "js_option", @@ -2033,30 +2488,34 @@ dependencies = [ "serde", "serde_html_form", "serde_json", + "thiserror 2.0.12", + "url", + "web-time", ] [[package]] name = "ruma-common" -version = "0.12.1" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.15.2" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "as_variant", "base64", "bytes", "form_urlencoded", + "getrandom 0.2.16", "http", - "indexmap 2.2.5", + "indexmap 2.10.0", "js_int", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", "serde", "serde_html_form", "serde_json", - "thiserror", + "thiserror 2.0.12", "time", "tracing", "url", @@ -2067,11 +2526,11 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.27.11" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.30.2" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "as_variant", - "indexmap 2.2.5", + "indexmap 2.10.0", "js_int", "js_option", "percent-encoding", @@ -2081,62 +2540,63 @@ dependencies = [ "ruma-macros", "serde", "serde_json", - "thiserror", + "thiserror 2.0.12", "tracing", "url", + "web-time", "wildmatch", ] [[package]] name = "ruma-federation-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.11.1" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ + "bytes", + "headers", + "http", + "http-auth", + "httparse", "js_int", + "memchr", + "mime", + "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", + "thiserror 2.0.12", + "tracing", ] [[package]] name = "ruma-identifiers-validation" -version = "0.9.3" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.10.1" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "js_int", - "thiserror", -] - -[[package]] -name = "ruma-identity-service-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" -dependencies = [ - "js_int", - "ruma-common", - "serde", + "thiserror 2.0.12", ] [[package]] name = "ruma-macros" -version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.15.1" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ - "once_cell", + "cfg-if", "proc-macro-crate", "proc-macro2", "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.52", + "syn", "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.11.0" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "js_int", "ruma-common", @@ -2147,42 +2607,42 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.14.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.17.1" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ "base64", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.8.5", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror", + "thiserror 2.0.12", ] [[package]] name = "ruma-state-res" -version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=5495b85aa311c2805302edb0a7de40399e22b397#5495b85aa311c2805302edb0a7de40399e22b397" +version = "0.13.0" +source = "git+https://github.com/ruma/ruma.git?rev=c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337#c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" dependencies = [ - "itertools 0.11.0", "js_int", "ruma-common", "ruma-events", + "ruma-signatures", "serde", "serde_json", - "thiserror", + "thiserror 2.0.12", "tracing", ] [[package]] name = "rusqlite" -version = "0.29.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.9.1", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2190,25 +2650,13 @@ dependencies = [ "smallvec", ] -[[package]] -name = "rust-argon2" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5885493fdf0be6cdff808d1533ce878d21cfa49c7086fa00c66355cd9141bfc" -dependencies = [ - "base64", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils", -] - [[package]] name = "rust-librocksdb-sys" -version = "0.20.0+9.1.0" +version = "0.38.0+10.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48b14f4848d8574c074bb26445b43e63735d802ef2fc5cc40c1b015134baee0c" +checksum = "455560a01423fff351732e26cb436b4ab43805642e0783d7db2c12d372849cf7" dependencies = [ - "bindgen", + "bindgen 0.72.0", "bzip2-sys", "cc", "glob", @@ -2220,9 +2668,9 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.24.0" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36eae38b1d3d0018e273191f791343bd3eb030d7da63aaa20350e41c0182881" +checksum = "41be0c32ea0155c97e2d19c2276739a2ec17606d0b89485ca5124c4b14e2a38f" dependencies = [ "libc", "rust-librocksdb-sys", @@ -2230,87 +2678,114 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] -name = "rustls" -version = "0.21.10" +name = "rustix" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" dependencies = [ "log", + "once_cell", "ring", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "web-time", + "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", + "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2320,28 +2795,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "sct" -version = "0.7.1" +name = "sd-notify" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" dependencies = [ - "ring", - "untrusted", + "libc", ] -[[package]] -name = "sd-notify" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" - [[package]] name = "security-framework" -version = "2.9.2" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.9.1", "core-foundation", "core-foundation-sys", "libc", @@ -2350,9 +2818,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -2360,38 +2828,38 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "serde_html_form" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50437e6a58912eecc08865e35ea2e8d365fbb2db0debb1c8bb43bf1faf055f25" +checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.2.5", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -2399,20 +2867,21 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "030fedb782600dcbd6f02d479bf0d817ac3bb40d644745b769d6a96bc3afc5a7" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -2420,9 +2889,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -2441,11 +2910,11 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.32" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.10.0", "itoa", "ryu", "serde", @@ -2476,9 +2945,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -2502,9 +2971,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -2515,7 +2984,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -2525,42 +2994,61 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" [[package]] -name = "simple_asn1" -version = "0.6.2" +name = "similar" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + +[[package]] +name = "simple_asn1" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror", + "thiserror 2.0.12", "time", ] [[package]] -name = "slab" -version = "0.4.9" +name = "siphasher" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" -version = "1.13.1" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "spin" version = "0.9.8" @@ -2577,6 +3065,33 @@ dependencies = [ "der", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "subslice" version = "0.2.3" @@ -2588,26 +3103,15 @@ dependencies = [ [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "1.0.109" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -2616,119 +3120,107 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", + "futures-core", ] [[package]] -name = "system-configuration-sys" -version = "0.5.0" +name = "synstructure" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ - "core-foundation-sys", - "libc", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "terminal_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] name = "thiserror" -version = "1.0.58" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.12", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "thrift" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" -dependencies = [ - "byteorder", - "integer-encoding", - "log", - "ordered-float", - "threadpool", -] - -[[package]] -name = "tikv-jemalloc-ctl" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "619bfed27d807b54f7f776b9430d4f8060e66ee138a28632ca898584d462c31c" -dependencies = [ - "libc", - "paste", - "tikv-jemalloc-sys", -] - -[[package]] -name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "tikv-jemallocator" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca" -dependencies = [ - "libc", - "tikv-jemalloc-sys", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -2741,25 +3233,35 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "tinystr" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -2772,60 +3274,49 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", - "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", ] -[[package]] -name = "tokio-socks" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51165dfa029d2a65969413a6cc96f354b86b464498702f174a4efa13608fd8c0" -dependencies = [ - "either", - "futures-util", - "thiserror", - "tokio", -] - [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -2834,23 +3325,22 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] name = "toml" -version = "0.8.2" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -2860,26 +3350,63 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.10.0", "serde", "serde_spanned", "toml_datetime", + "toml_write", "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.4.13" @@ -2888,28 +3415,47 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] -name = "tower-http" -version = "0.4.4" +name = "tower" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ - "bitflags 2.4.2", - "bytes", "futures-core", "futures-util", + "pin-project-lite", + "sync_wrapper", + "tokio", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.9.1", + "bytes", + "futures-util", "http", "http-body", - "http-range-header", + "iri-string", "pin-project-lite", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -2917,23 +3463,22 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2941,20 +3486,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -2971,17 +3516,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "tracing-log" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -2995,41 +3529,58 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.18.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de" +checksum = "a9784ed4da7d921bc8df6963f8c80a0e4ce34ba6ba76668acadd3edbd985ff3b" dependencies = [ + "js-sys", "once_cell", "opentelemetry", + "opentelemetry_sdk", + "smallvec", "tracing", "tracing-core", - "tracing-log 0.1.4", + "tracing-log", "tracing-subscriber", + "web-time", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", + "serde", + "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", + "tracing-serde", ] [[package]] name = "trust-dns-proto" -version = "0.22.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" +checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" dependencies = [ "async-trait", "cfg-if", @@ -3038,12 +3589,12 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.2.3", + "idna 0.4.0", "ipnet", - "lazy_static", - "rand", + "once_cell", + "rand 0.8.5", "smallvec", - "thiserror", + "thiserror 1.0.69", "tinyvec", "tokio", "tracing", @@ -3052,19 +3603,20 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.22.0" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" +checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" dependencies = [ "cfg-if", "futures-util", "ipconfig", - "lazy_static", "lru-cache", + "once_cell", "parking_lot", + "rand 0.8.5", "resolv-conf", "smallvec", - "thiserror", + "thiserror 1.0.69", "tokio", "tracing", "trust-dns-proto", @@ -3078,15 +3630,15 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "typewit" -version = "1.9.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +checksum = "97e72ba082eeb9da9dc68ff5a2bf727ef6ce362556e8d29ec1aed3bd05e7d86a" dependencies = [ "typewit_proc_macros", ] @@ -3098,46 +3650,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" [[package]] -name = "uncased" -version = "0.9.10" +name = "ucd-trie" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" -dependencies = [ - "version_check", -] +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" - -[[package]] -name = "unsigned-varint" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -3147,29 +3690,44 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 1.0.3", "percent-encoding", + "serde", ] [[package]] -name = "uuid" -version = "1.7.0" +name = "utf8-width" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ - "getrandom", + "getrandom 0.3.3", + "js-sys", + "wasm-bindgen", ] [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "vcpkg" @@ -3179,9 +3737,18 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] [[package]] name = "want" @@ -3194,52 +3761,63 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", + "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3247,28 +3825,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -3286,21 +3867,21 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" +checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3" [[package]] name = "widestring" -version = "1.0.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "wildmatch" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "017f0a8ed8331210d91b7a4c30d4edef8f21a65c02f2540496e2e79725f6d8a8" +checksum = "68ce1ab1f8c62655ebe1350f589c61e505cf94d385bc6a12899442d9081e71fd" [[package]] name = "winapi" @@ -3324,6 +3905,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + [[package]] name = "windows-sys" version = "0.48.0" @@ -3339,7 +3926,25 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", ] [[package]] @@ -3359,17 +3964,35 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] @@ -3380,9 +4003,15 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" [[package]] name = "windows_aarch64_msvc" @@ -3392,9 +4021,15 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" [[package]] name = "windows_i686_gnu" @@ -3404,9 +4039,27 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" [[package]] name = "windows_i686_msvc" @@ -3416,9 +4069,15 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" [[package]] name = "windows_x86_64_gnu" @@ -3428,9 +4087,15 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" [[package]] name = "windows_x86_64_gnullvm" @@ -3440,9 +4105,15 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" [[package]] name = "windows_x86_64_msvc" @@ -3452,15 +4123,21 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.5.40" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] @@ -3476,52 +4153,152 @@ dependencies = [ ] [[package]] -name = "yansi" -version = "1.0.1" +name = "wit-bindgen-rt" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.1", +] + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "xdg" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] -name = "zigzag" -version = "0.1.0" +name = "zerotrie" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ - "num-traits", + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] + +[[package]] +name = "zune-core" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" + +[[package]] +name = "zune-jpeg" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1f7e205ce79eb2da3cd71c5f55f3589785cb7c79f6a03d1c8d1491bda5d089" +dependencies = [ + "zune-core", +] diff --git a/Cargo.toml b/Cargo.toml index 4dfc04f3..3c722659 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,188 +1,194 @@ # Keep alphabetically sorted [workspace.lints.rust] +elided_lifetimes_in_paths = "warn" explicit_outlives_requirements = "warn" +macro_use_extern_crate = "warn" +missing_abi = "warn" +noop_method_call = "warn" +single_use_lifetimes = "warn" +unreachable_pub = "warn" +unsafe_op_in_unsafe_fn = "warn" +unused_extern_crates = "warn" +unused_import_braces = "warn" +unused_lifetimes = "warn" +unused_macro_rules = "warn" unused_qualifications = "warn" -# Keep alphabetically sorted [workspace.lints.clippy] -cloned_instead_of_copied = "warn" +# Groups. Keep alphabetically sorted +pedantic = { level = "warn", priority = -1 } + +# Lints. Keep alphabetically sorted +as_conversions = "warn" +assertions_on_result_states = "warn" dbg_macro = "warn" +default_union_representation = "warn" +deref_by_slicing = "warn" +empty_drop = "warn" +empty_structs_with_brackets = "warn" +error_impl_error = "warn" +filetype_is_file = "warn" +float_cmp_const = "warn" +format_push_string = "warn" +get_unwrap = "warn" +if_then_some_else_none = "warn" +impl_trait_in_params = "warn" +let_underscore_must_use = "warn" +lossy_float_literal = "warn" +mem_forget = "warn" +missing_assert_message = "warn" +mod_module_files = "warn" +multiple_inherent_impl = "warn" +mutex_atomic = "warn" +negative_feature_names = "warn" +non_zero_suggestions = "warn" +pub_without_shorthand = "warn" +rc_buffer = "warn" +rc_mutex = "warn" +redundant_feature_names = "warn" +redundant_type_annotations = "warn" +ref_patterns = "warn" +rest_pat_in_fully_bound_structs = "warn" +same_name_method = "warn" +semicolon_inside_block = "warn" str_to_string = "warn" +string_add = "warn" +string_lit_chars_any = "warn" +string_slice = "warn" +string_to_string = "warn" +suspicious_xor_used_as_pow = "warn" +tests_outside_test_module = "warn" +try_err = "warn" +undocumented_unsafe_blocks = "warn" +unnecessary_safety_comment = "warn" +unnecessary_safety_doc = "warn" +unnecessary_self_imports = "warn" +unneeded_field_pattern = "warn" +unseparated_literal_suffix = "warn" +unused_result_ok = "warn" +verbose_file_reads = "warn" +wildcard_dependencies = "warn" + +# TODO: Remove these: +missing_errors_doc = "allow" +missing_panics_doc = "allow" [package] -name = "conduit" +name = "grapevine" description = "A Matrix homeserver written in Rust" license = "Apache-2.0" -authors = ["timokoesters "] -homepage = "https://conduit.rs" -repository = "https://gitlab.com/famedly/conduit" -readme = "README.md" -version = "0.7.0" +version = "0.1.0" edition = "2021" # See also `rust-toolchain.toml` -rust-version = "1.75.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +rust-version = "1.85.0" [lints] workspace = true +# Keep sorted [dependencies] -# Web framework -axum = { version = "0.6.18", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.5.1", features = ["tls-rustls"] } -tower = { version = "0.4.13", features = ["util"] } -tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } - -# Used for matrix spec type definitions and helpers -#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "5495b85aa311c2805302edb0a7de40399e22b397", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { git = "https://github.com/timokoesters/ruma", rev = "4ec9c69bb7e09391add2382b3ebac97b6e8f4c64", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } -#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-msc3575", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } - -# Async runtime and utilities -tokio = { version = "1.28.1", features = ["fs", "macros", "signal", "sync"] } -# Used for storing data permanently -#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } -#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.4.4", optional = true, features = ["background_ops"] } - -# Used for the http request / response body type for Ruma endpoints used with reqwest -bytes = "1.4.0" -http = "0.2.9" -# Used to find data directory for default db path -directories = "4.0.1" -# Used for ruma wrapper -serde_json = { version = "1.0.96", features = ["raw_value"] } -# Used for appservice registration files -serde_yaml = "0.9.21" -# Used for pdu definition -serde = { version = "1.0.163", features = ["rc"] } -# Used for secure identifiers -rand = "0.8.5" -# Used to hash passwords -rust-argon2 = "1.0.0" -# Used to send requests -hyper = "0.14.26" -reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls-native-roots", "socks"] } -# Used for conduit::Error type -thiserror = "1.0.40" -# Used to generate thumbnails for images -image = { version = "0.24.6", default-features = false, features = ["jpeg", "png", "gif"] } -# Used to encode server public key -base64 = "0.21.2" -# Used when hashing the state -ring = "0.17.7" -# Used when querying the SRV record of other servers -trust-dns-resolver = "0.22.0" -# Used to find matching events for appservices -regex = "1.8.1" -# jwt jsonwebtokens -jsonwebtoken = "9.2.0" -# Performance measurements -tracing = { version = "0.1.37", features = [] } -tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -tracing-flame = "0.2.0" -opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } -tracing-opentelemetry = "0.18.0" -lru-cache = "0.1.2" -rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } -parking_lot = { version = "0.12.1", optional = true } -# crossbeam = { version = "0.8.2", optional = true } -num_cpus = "1.15.0" -threadpool = "1.8.1" -# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -# Used for ruma wrapper -serde_html_form = "0.2.0" - -thread_local = "1.1.7" -# used for TURN server authentication +argon2 = "0.5.3" +async-trait = "0.1.88" +axum = { version = "0.7.9", default-features = false, features = ["form", "http1", "http2", "json", "matched-path", "tokio", "tracing"] } +axum-extra = { version = "0.9.6", features = ["typed-header"] } +axum-server = { git = "https://gitlab.computer.surgery/matrix/thirdparty/axum-server.git", rev = "v0.7.2+grapevine-1", version = "0.7.2", features = ["tls-rustls-no-provider"] } +base64 = "0.22.1" +bytes = "1.10.1" +clap = { version = "4.5.42", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string", "wrap_help"] } +futures-util = { version = "0.3.31", default-features = false } hmac = "0.12.1" +html-escape = "0.2.13" +http = "1.3.1" +http-body-util = "0.1.3" +hyper = "1.6.0" +image = { version = "0.25.6", default-features = false, features = ["jpeg", "png", "gif"] } +jsonwebtoken = "9.3.1" +lru-cache = "0.1.2" +num_cpus = "1.17.0" +opentelemetry = "0.24.0" +opentelemetry-jaeger-propagator = "0.3.0" +opentelemetry-otlp = "0.17.0" +opentelemetry-prometheus = "0.17.0" +opentelemetry_sdk = { version = "0.24.1", features = ["rt-tokio"] } +parking_lot = { version = "0.12.4", optional = true } +phf = { version = "0.11.3", features = ["macros"] } +pin-project-lite = "0.2.16" +prometheus = "0.13.4" +proxy-header = { version = "0.1.2", features = ["tokio"] } +rand = "0.8.5" +regex = "1.11.1" +reqwest = { version = "0.12.22", default-features = false, features = ["http2", "rustls-tls-native-roots", "socks"] } +ring = "0.17.14" +rocksdb = { package = "rust-rocksdb", version = "0.42.1", features = ["lz4", "multi-threaded-cf", "zstd"], optional = true } +rusqlite = { version = "0.34.0", optional = true, features = ["bundled"] } +rustls = { version = "0.23.31", default-features = false, features = ["ring", "log", "logging", "std", "tls12"] } +sd-notify = { version = "0.4.5", optional = true } +serde = { version = "1.0.219", features = ["rc"] } +serde_html_form = "0.2.7" +serde_json = { version = "1.0.141", features = ["raw_value"] } +serde_yaml = "0.9.34" sha-1 = "0.10.1" -# used for conduit's CLI and admin room command parsing -clap = { version = "4.3.0", default-features = false, features = ["std", "derive", "help", "usage", "error-context", "string"] } -futures-util = { version = "0.3.28", default-features = false } -# Used for reading the configuration from conduit.toml & environment variables -figment = { version = "0.10.8", features = ["env", "toml"] } +strum = { version = "0.27.2", features = ["derive"] } +thiserror = "2.0.12" +thread_local = "1.1.9" +tokio = { version = "1.47.0", features = ["fs", "macros", "signal", "sync"] } +toml = "0.8.23" +tower = { version = "0.5.2", features = ["util"] } +tower-http = { version = "0.6.6", features = ["add-extension", "cors", "sensitive-headers", "trace", "util"] } +tracing = { version = "0.1.41", features = [] } +tracing-flame = "0.2.0" +tracing-opentelemetry = "0.25.0" +tracing-subscriber = { version = "0.3.19", features = ["env-filter", "json"] } +trust-dns-resolver = "0.23.2" +xdg = "2.5.2" -tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } -tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } -lazy_static = "1.4.0" -async-trait = "0.1.68" - -sd-notify = { version = "0.4.1", optional = true } - -[dependencies.rocksdb] -package = "rust-rocksdb" -version = "0.24.0" -optional = true +[dependencies.ruma] +git = "https://github.com/ruma/ruma.git" +rev = "c4f467781a7ef330dc0b7eb5d0d0cad77ebc3337" features = [ - "multi-threaded-cf", - "zstd", - "lz4", + "compat-server-signing-key-version", + "compat-empty-string-null", + "compat-null", + "compat-optional", + "compat-unset-avatar", + "compat-get-3pids", + "compat-tag-info", + "compat-optional-txn-pdus", + + "rand", + "appservice-api-c", + "client-api", + "federation-api", + "push-gateway-api-c", + "state-res", + "unstable-msc2448", + "ring-compat", ] [target.'cfg(unix)'.dependencies] -nix = { version = "0.28", features = ["resource"] } +nix = { version = "0.29", features = ["resource", "time"] } + +[dev-dependencies] +assert_cmd = "2.0.17" +insta = { version = "1.43.1", features = ["filters", "json", "redactions"] } +predicates = "3.1.3" +tempfile = "3.20.0" + +[profile.dev.package.insta] +opt-level = 3 + +[profile.dev.package.similar] +opt-level = 3 [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] -#backend_sled = ["sled"] -backend_persy = ["persy", "parking_lot"] -backend_sqlite = ["sqlite"] -#backend_heed = ["heed", "crossbeam"] -backend_rocksdb = ["rocksdb"] -jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] -sqlite = ["rusqlite", "parking_lot", "tokio/signal"] -conduit_bin = ["axum"] -systemd = ["sd-notify"] +default = ["rocksdb", "sqlite", "systemd"] -[[bin]] -name = "conduit" -path = "src/main.rs" -required-features = ["conduit_bin"] +# Keep sorted +rocksdb = ["dep:rocksdb"] +sqlite = ["dep:rusqlite", "dep:parking_lot", "tokio/signal"] +systemd = ["dep:sd-notify"] -[lib] -name = "conduit" -path = "src/lib.rs" - -[package.metadata.deb] -name = "matrix-conduit" -maintainer = "Paul van Tilburg " -copyright = "2020, Timo Kösters " -license-file = ["LICENSE", "3"] -depends = "$auto, ca-certificates" -extended-description = """\ -A fast Matrix homeserver that is optimized for smaller, personal servers, \ -instead of a server that has high scalability.""" -section = "net" -priority = "optional" -assets = [ - ["debian/README.md", "usr/share/doc/matrix-conduit/README.Debian", "644"], - ["README.md", "usr/share/doc/matrix-conduit/", "644"], - ["target/release/conduit", "usr/sbin/matrix-conduit", "755"], -] -conf-files = [ - "/etc/matrix-conduit/conduit.toml" -] -maintainer-scripts = "debian/" -systemd-units = { unit-name = "matrix-conduit" } - -[profile.dev] -lto = 'off' -incremental = true - -[profile.release] -lto = 'thin' -incremental = true -codegen-units=32 -# If you want to make flamegraphs, enable debug info: -# debug = true - -# For releases also try to max optimizations for dependencies: -[profile.release.build-override] -opt-level = 3 -[profile.release.package."*"] -opt-level = 3 +[profile.release-debug] +inherits = "release" +debug = true diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d9a10c0d..00000000 --- a/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 00000000..6f0d0bab --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,172 @@ +# Apache License + +Version 2.0, January 2004, + +## TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +### 1. Definitions + +* **"License"** shall mean the terms and conditions for use, reproduction, and + distribution as defined by Sections 1 through 9 of this document. + +* **"Licensor"** shall mean the copyright owner or entity authorized by the + copyright owner that is granting the License. + +* **"Legal Entity"** shall mean the union of the acting entity and all other + entities that control, are controlled by, or are under common control with + that entity. For the purposes of this definition, "control" means (i) the + power, direct or indirect, to cause the direction or management of such + entity, whether by contract or otherwise, or (ii) ownership of fifty percent + (50%) or more of the outstanding shares, or (iii) beneficial ownership of such + entity. + +* **"You"** (or **"Your"**) shall mean an individual or Legal Entity exercising + permissions granted by this License. + +* **"Source"** form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation source, and + configuration files. + +* **"Object"** form shall mean any form resulting from mechanical transformation + or translation of a Source form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + +* **"Work"** shall mean the work of authorship, whether in Source or Object + form, made available under the License, as indicated by a copyright notice + that is included in or attached to the work (an example is provided in the + Appendix below). + +* **"Derivative Works"** shall mean any work, whether in Source or Object form, + that is based on (or derived from) the Work and for which the editorial + revisions, annotations, elaborations, or other modifications represent, as + a whole, an original work of authorship. For the purposes of this License, + Derivative Works shall not include works that remain separable from, or merely + link (or bind by name) to the interfaces of, the Work and Derivative Works + thereof. + +* **"Contribution"** shall mean any work of authorship, including the original + version of the Work and any modifications or additions to that Work or + Derivative Works thereof, that is intentionally submitted to Licensor for + inclusion in the Work by the copyright owner or by an individual or Legal + Entity authorized to submit on behalf of the copyright owner. For the purposes + of this definition, "submitted" means any form of electronic, verbal, or + written communication sent to the Licensor or its representatives, including + but not limited to communication on electronic mailing lists, source code + control systems, and issue tracking systems that are managed by, or on behalf + of, the Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise designated + in writing by the copyright owner as "Not a Contribution." + +* **"Contributor"** shall mean Licensor and any individual or Legal Entity on + behalf of whom a Contribution has been received by Licensor and subsequently + incorporated within the Work. + +### 2. Grant of Copyright License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +### 3. Grant of Patent License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including +a cross-claim or counterclaim in a lawsuit) alleging that the Work or +a Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +### 4. Redistribution + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +1. You must give any other recipients of the Work or Derivative Works a copy of + this License; and + +2. You must cause any modified files to carry prominent notices stating that You + changed the files; and + +3. You must retain, in the Source form of any Derivative Works that You + distribute, all copyright, patent, trademark, and attribution notices from + the Source form of the Work, excluding those notices that do not pertain to + any part of the Derivative Works; and + +4. If the Work includes a "NOTICE" text file as part of its distribution, then + any Derivative Works that You distribute must include a readable copy of the + attribution notices contained within such NOTICE file, excluding those + notices that do not pertain to any part of the Derivative Works, in at least + one of the following places: within a NOTICE text file distributed as part of + the Derivative Works; within the Source form or documentation, if provided + along with the Derivative Works; or, within a display generated by the + Derivative Works, if and wherever such third-party notices normally appear. + The contents of the NOTICE file are for informational purposes only and do + not modify the License. You may add Your own attribution notices within + Derivative Works that You distribute, alongside or as an addendum to the + NOTICE text from the Work, provided that such additional attribution notices + cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +### 5. Submission of Contributions + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +### 6. Trademarks + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +### 7. Disclaimer of Warranty + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +### 8. Limitation of Liability + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +### 9. Accepting Warranty or Additional Liability + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. diff --git a/README.md b/README.md index 474a5247..ae3f9a70 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,9 @@ -# Conduit +# Grapevine - -### A Matrix homeserver written in Rust - +A Matrix homeserver. -Please visit the [Conduit documentation](https://famedly.gitlab.io/conduit) for more information. -Alternatively you can open [docs/introduction.md](docs/introduction.md) in this repository. +## Read the book - -#### What is Matrix? +[Click here to read the latest version.][0] -[Matrix](https://matrix.org) is an open network for secure and decentralized -communication. Users from every Matrix homeserver can chat with users from all -other Matrix servers. You can even use bridges (also called Matrix appservices) -to communicate with users outside of Matrix, like a community on Discord. - -#### What is the goal? - -An efficient Matrix homeserver that's easy to set up and just works. You can install -it on a mini-computer like the Raspberry Pi to host Matrix for your family, -friends or company. - -#### Can I try it out? - -Yes! You can test our Conduit instance by opening a client that supports registration tokens such as [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/) and registering on the `conduit.rs` homeserver. The registration token is "for_testing_only". Don't share personal information. Once you have registered, you can use any other [Matrix client](https://matrix.org/ecosystem/clients) to login. - -Server hosting for conduit.rs is donated by the Matrix.org Foundation. - -#### What is the current status? - -Conduit is Beta, meaning you can join and participate in most -Matrix rooms, but not all features are supported and you might run into bugs -from time to time. - -There are still a few important features missing: - -- E2EE emoji comparison over federation (E2EE chat works) -- Outgoing read receipts, typing, presence over federation (incoming works) - - - -#### How can I contribute? - -1. Look for an issue you would like to work on and make sure no one else is currently working on it. -2. Tell us that you are working on the issue (comment on the issue or chat in - [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)). If it is more complicated, please explain your approach and ask questions. -3. Fork the repo, create a new branch and push commits. -4. Submit a MR - -#### Contact - -If you have any questions, feel free to -- Ask in `#conduit:fachschaften.org` on Matrix -- Write an E-Mail to `conduit@koesters.xyz` -- Send an direct message to `@timokoesters:fachschaften.org` on Matrix -- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new) - -#### Thanks to - -Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project. - -Thanks to the contributors to Conduit and all libraries we use, for example: - -- Ruma: A clean library for the Matrix Spec in Rust -- axum: A modular web framework - -#### Donate - -- Liberapay: -- Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` - -#### Logo - -- Lightning Bolt Logo: -- Logo License: - +[0]: https://grapevine.computer.surgery/ diff --git a/bin/complement b/bin/complement deleted file mode 100755 index 291953dd..00000000 --- a/bin/complement +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Path to Complement's source code -COMPLEMENT_SRC="$1" - -# A `.jsonl` file to write test logs to -LOG_FILE="$2" - -# A `.jsonl` file to write test results to -RESULTS_FILE="$3" - -OCI_IMAGE="complement-conduit:dev" - -env \ - -C "$(git rev-parse --show-toplevel)" \ - docker build \ - --tag "$OCI_IMAGE" \ - --file complement/Dockerfile \ - . - -# It's okay (likely, even) that `go test` exits nonzero -set +o pipefail -env \ - -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -json ./tests | tee "$LOG_FILE" -set -o pipefail - -# Post-process the results into an easy-to-compare format -cat "$LOG_FILE" | jq -c ' - select( - (.Action == "pass" or .Action == "fail" or .Action == "skip") - and .Test != null - ) | {Action: .Action, Test: .Test} - ' | sort > "$RESULTS_FILE" diff --git a/bin/job b/bin/job new file mode 100755 index 00000000..3726f92d --- /dev/null +++ b/bin/job @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +job_artifacts() ( + set -euo pipefail + + nix-build-and-cache packages + + # Subsequent `nix build` calls should be fast because the above line ensures + # the packages have been built already. + + packages=( + static-aarch64-unknown-linux-musl + static-x86_64-unknown-linux-musl + ) + + for x in "${packages[@]}"; do + nix build ".#$x" + cp result/bin/grapevine grapevine-"$x" + chmod u+w grapevine-"$x" + done +) + +job_ci() ( + set -euo pipefail + + nix-build-and-cache ci + direnv exec . engage +) + +job_pages() ( + set -euo pipefail + + nix build .#website-root + cp --recursive --dereference result public + chmod u+w -R public +) + +job_cache-ci-deps() ( + set -euo pipefail + + nix-build-and-cache ci +) + +bail() ( + set -euo pipefail + + git show --shortstat + echo + echo "Failure caused by the above commit" + exit 1 +) + +mark_commit_passed() ( + set -euo pipefail + + mkdir -p ".gitlab-ci.d/passed/$1" + touch ".gitlab-ci.d/passed/$1/$(git rev-parse HEAD)" +) + +commit_passed() ( + set -euo pipefail + + [[ -f ".gitlab-ci.d/passed/$1/$(git rev-parse HEAD)" ]] +) + +contains() ( + set -euo pipefail + + local -n xs=$1 + + for x in "${xs[@]}"; do + if [[ "$x" == "$2" ]]; then + return 0 + else + return 1 + fi + done +) + +# Jobs that should only run on the latest commit rather than since the branch's +# base. +last_commit_only=( + artifacts +) + +run() ( + set -euo pipefail + + if [[ -z "${1+x}" ]]; then + echo "You must supply a job to run. Available jobs:" + declare -F | rg \ + --only-matching \ + --color never \ + --replace '* $1' \ + '^declare -f job_(.*)$' + + exit 1 + fi + + job="$1" + + cd "$(git rev-parse --show-toplevel)" + + if \ + [[ -z "${CI_MERGE_REQUEST_DIFF_BASE_SHA+x}" ]] \ + || contains last_commit_only "$job" + then + echo "Running against latest commit only..." + + job_"$job" || bail + else + echo "Running against all commits since this branch's base..." + + readarray -t commits < \ + <(git rev-list --reverse "$CI_MERGE_REQUEST_DIFF_BASE_SHA..HEAD") + for commit in "${commits[@]}"; do + git checkout "$commit" + + if commit_passed "$job"; then + echo "Skipping commit because it already passed: $commit" + continue + fi + + job_"$job" || bail + + mark_commit_passed "$job" + done + fi +) + +run "$@" diff --git a/bin/nix-build-and-cache b/bin/nix-build-and-cache index 350e1717..f9eaee1a 100755 --- a/bin/nix-build-and-cache +++ b/bin/nix-build-and-cache @@ -2,25 +2,69 @@ set -euo pipefail -# The first argument must be the desired installable -INSTALLABLE="$1" +# Build and cache the specified arguments +just() { + if command -v nom &> /dev/null; then + nom build "$@" + else + nix build "$@" + fi -# Build the installable and forward any other arguments too -nix build "$@" + # Find all output paths of the installables and their build dependencies + readarray -t derivations < <(nix path-info --derivation "$@") + readarray -t upload_paths < <( + xargs \ + nix-store --query --requisites --include-outputs \ + <<< "${derivations[*]}" + ) -if [ ! -z ${ATTIC_TOKEN+x} ]; then - nix run --inputs-from . attic -- \ - login \ - conduit \ - "${ATTIC_ENDPOINT:-https://nix.computer.surgery/conduit}" \ - "$ATTIC_TOKEN" + echo "Found ${#upload_paths[@]} paths to upload" - # Push the target installable and its build dependencies - nix run --inputs-from . attic -- \ - push \ - conduit \ - "$(nix path-info "$INSTALLABLE" --derivation)" \ - "$(nix path-info "$INSTALLABLE")" -else - echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" -fi + if [ -z ${ATTIC_TOKEN+x} ]; then + echo "\$ATTIC_TOKEN is unset, skipping uploading to the binary cache" + return + fi + + nix run --inputs-from . attic#default -- \ + login "$ATTIC_SERVER" "$ATTIC_ENDPOINT" "$ATTIC_TOKEN" + + # Upload them to Attic. It seems to insist on newlines to separate the + # paths. + ( + IFS=$'\n' + nix run --inputs-from . attic#default -- \ + push --stdin --no-closure "$ATTIC_SERVER:$ATTIC_CACHE" \ + <<< "${upload_paths[*]}" + ) +} + +# Build and cache things needed for CI +ci() { + installables=( + --inputs-from . + + # Keep sorted + ".#devShells.x86_64-linux.default" + attic#default + nixpkgs#direnv + nixpkgs#jq + ) + + just "${installables[@]}" +} + +# Build and cache all the package outputs +packages() { + readarray -t installables < <( + nix flake show --json 2> /dev/null | + nix run --inputs-from . nixpkgs#jq -- \ + -r \ + '.packages."x86_64-linux" | keys | map(".#" + .) | .[]' + ) + + just "${installables[@]}" +} + +pushd "$(git rev-parse --show-toplevel)" > /dev/null +"$@" +popd > /dev/null diff --git a/book.toml b/book.toml index e25746ca..eb6c2bdf 100644 --- a/book.toml +++ b/book.toml @@ -1,18 +1,12 @@ [book] -title = "Conduit" -description = "Conduit is a simple, fast and reliable chat server for the Matrix protocol" +title = "Grapevine" language = "en" multilingual = false -src = "docs" +src = "book" [build] -build-dir = "public" -create-missing = true +build-dir = "target/book" [output.html] -git-repository-url = "https://gitlab.com/famedly/conduit" -edit-url-template = "https://gitlab.com/famedly/conduit/-/edit/next/{path}" git-repository-icon = "fa-git-square" - -[output.html.search] -limit-results = 15 +git-repository-url = "https://gitlab.computer.surgery/matrix/grapevine" diff --git a/book/SUMMARY.md b/book/SUMMARY.md new file mode 100644 index 00000000..b1317642 --- /dev/null +++ b/book/SUMMARY.md @@ -0,0 +1,12 @@ +# Summary + +* [Introduction](./introduction.md) +* [Code of conduct](./code-of-conduct.md) +* [Changelog](./changelog.md) +* [Installing](./installing.md) + * [Supported targets](./installing/supported-targets.md) + * [Migrating to/from Conduit](./installing/migrating-conduit.md) + * [Migrating to/from Conduwuit](./installing/migrating-conduwuit.md) +* [Contributing](./contributing.md) + * [Coordinated vulnerability disclosure](./contributing/security.md) + * [Style guide](./contributing/style-guide.md) diff --git a/book/changelog.md b/book/changelog.md new file mode 100644 index 00000000..9614862f --- /dev/null +++ b/book/changelog.md @@ -0,0 +1,340 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog][keep-a-changelog], and this project +adheres to [Semantic Versioning][semver]. + +[keep-a-changelog]: https://keepachangelog.com/en/1.0.0/ +[semver]: https://semver.org/spec/v2.0.0.html + + + +## Unreleased + + + +This will be the first release of Grapevine since it was forked from Conduit +0.7.0. + +### Security + +1. Prevent XSS via user-uploaded media. + ([!8](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/8)) +2. Switch from incorrect, hand-rolled `X-Matrix` `Authorization` parser to the + much better implementation provided by Ruma. + ([!31](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/31)) + * This is not practically exploitable to our knowledge, but this change does + reduce risk. +3. Switch to a more trustworthy password hashing library. + ([!29](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/29)) + * This is not practically exploitable to our knowledge, but this change does + reduce risk. +4. Don't return redacted events from the search endpoint. + ([!41 (f74043d)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=f74043df9aa59b406b5086c2e9fa2791a31aa41b), + [!41 (83cdc9c)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=83cdc9c708cd7b50fe1ab40ea6a68dcf252c190b)) +5. Prevent impersonation in EDUs. + ([!41 (da99b07)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=da99b0706e683a2d347768efe5b50676abdf7b44)) + * `m.signing_key_update` was not affected by this bug. +6. Verify PDUs and transactions against the temporally-correct signing keys. + ([!41 (9087da9)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/41/diffs?commit_id=9087da91db8585f34d026a48ba8fdf64865ba14d)) +7. Only allow the admin bot to change the room ID that the admin room alias + points to. + ([!42](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/42)) + +### Removed + +1. Remove update checker. + ([17a0b34](https://gitlab.computer.surgery/matrix/grapevine/-/commit/17a0b3430934fbb8370066ee9dc3506102c5b3f6)) +2. Remove optional automatic display name emoji for newly registered users. + ([cddf699](https://gitlab.computer.surgery/matrix/grapevine/-/commit/cddf6991f280008b5af5acfab6a9719bb0cfb7f1)) +3. Remove admin room welcome message on first startup. + ([c9945f6](https://gitlab.computer.surgery/matrix/grapevine/-/commit/c9945f6bbac6e22af6cf955cfa99826d4b04fe8c)) +4. Remove incomplete presence implementation. + ([f27941d](https://gitlab.computer.surgery/matrix/grapevine/-/commit/f27941d5108acda250921c6a58499a46568fd030)) +5. Remove Debian packaging. + ([d41f0fb](https://gitlab.computer.surgery/matrix/grapevine/-/commit/d41f0fbf72dae6562358173f425d23bb0e174ca2)) +6. Remove Docker packaging. + ([!48](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/48)) +7. **BREAKING:** Remove unstable room versions. + ([!59](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/59)) +8. Remove `memory-usage`, `clear-database-caches`, and `clear-service-caches` + admin commands. + ([!123](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/123)) + * The `memory-usage` command wasn't particularly useful since it can't + actually give you an accurate value in bytes and isn't supported on all + database backends. + * The latter two commands had poor UX and didn't have any noticable effect on + memory consumption. +9. **BREAKING:** Remove the `global.conduit_cache_capacity_modifier` and + `global.pdu_cache_capacity` configuration options. + ([!124](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/124)) + * Instead, it is now possible to configure each cache capacity individually. +10. Remove jemalloc support. + ([!93](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/193)) +11. Removed support for MSC3575 (sliding sync), which has been closed. + ([!198](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/198)) + +### Changed + +1. **BREAKING:** Rename Conduit to Grapevine. + ([360e020](https://gitlab.computer.surgery/matrix/grapevine/-/commit/360e020b644bd012ed438708b661a25fbd124f68)) + * The `CONDUIT_VERSION_EXTRA` build-time environment variable has been + renamed to `GRAPEVINE_VERSION_EXTRA`. This change only affects distribution + packagers or non-Nix users who are building from source. If you fall into + one of those categories *and* were explicitly setting this environment + variable, make sure to change its name before building Grapevine. +2. **BREAKING:** Change the default port from 8000 to 6167. + ([f205280](https://gitlab.computer.surgery/matrix/grapevine/-/commit/f2052805201f0685d850592b1c96f4861c58fb22)) + * If you relied on the default port being 8000, either update your other + configuration to use the new port, or explicitly configure Grapevine's port + to 8000. +3. Improve tracing spans and events. + ([!11 (a275db3)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=a275db3847b8d5aaa0c651a686c19cfbf9fdb8b5) + (merged as [5172f66](https://gitlab.computer.surgery/matrix/grapevine/-/commit/5172f66c1a90e0e97b67be2897ae59fbc00208a4)), + [!11 (a275db3)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=a275db3847b8d5aaa0c651a686c19cfbf9fdb8b5) + (merged as [5172f66](https://gitlab.computer.surgery/matrix/grapevine/-/commit/5172f66c1a90e0e97b67be2897ae59fbc00208a4)), + [!11 (f556fce)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/11/diffs?commit_id=f556fce73eb7beec2ed7b1781df0acdf47920d9c) + (merged as [ac42e0b](https://gitlab.computer.surgery/matrix/grapevine/-/commit/ac42e0bfff6af8677636a3dc1a56701a3255071d)), + [!18](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/18), + [!26](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/26), + [!50](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/50), + [!52](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/52), + [!54](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/54), + [!56](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/56), + [!69](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/69), + [!102](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/102), + [!127](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/127), + [!141](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/141)) +4. Stop returning unnecessary member counts from `/_matrix/client/{r0,v3}/sync`. + ([!12](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/12)) +5. **BREAKING:** Allow federation by default. + ([!24](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/24)) + * If you relied on federation being disabled by default, make sure to + explicitly disable it before upgrading. +6. **BREAKING:** Remove the `[global]` section from the configuration file. + ([!38](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/38)) + * Details on how to migrate can be found in the merge request's description. +7. **BREAKING:** Allow specifying multiple transport listeners in the + configuration file. + ([!39](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/39)) + * Details on how to migrate can be found in the merge request's description. +8. Increase default log level so that span information is included. + ([!50](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/50)) +9. **BREAKING:** Reorganize config into sections. + ([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49)) + * Details on how to migrate can be found in the merge request's description. +10. Try to generate thumbnails for remote media ourselves if the federation + thumbnail request fails. + ([!58](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/58)) +11. **BREAKING:** Disable unauthenticated access to media by default. Use + `media.allow_unauthenticated_access` to configure this behavior. + ([!103](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/103), + [!140](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/140)) +12. **BREAKING:** Split CLI into multiple subcommands. The CLI invocation to run + the server is now behind the `serve` command, so `grapevine --config ...` + becomes `grapevine serve --config ...`. + ([!108](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/108)) +13. **BREAKING:** The path to media files is now specified separately from the + database path. + ([!140](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/140), + [!170](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/170)) +14. Use trust-dns for all DNS queries, instead of only for SRV records and SRV + record targets in server discovery. + ([!156](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/156)) + +### Fixed + +1. Fix questionable numeric conversions. + ([71c48f6](https://gitlab.computer.surgery/matrix/grapevine/-/commit/71c48f66c4922813c2dc30b7b875200e06ce4b75)) +2. Stop sending no-longer-valid cached responses from the + `/_matrix/client/{r0,v3}/sync` endpoints. + ([!7](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/7)) +3. Stop returning extra E2EE device updates from `/_matrix/client/{r0,v3}/sync` + as that violates the specification. + ([!12](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/12)) +4. Make certain membership state transitions work correctly again. + ([!16](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/16)) + * For example, it was previously impossible to unban users from rooms. +5. Ensure that `tracing-flame` flushes all its data before the process exits. + ([!20 (263edcc)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=263edcc8a127ad2a541a3bb6ad35a8a459ea5616)) +6. Reduce the likelihood of locking up the async runtime. + ([!19](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/19)) +7. Fix search results not including subsequent pages in certain situations. + ([!35 (0cdf032)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/35/diffs?commit_id=0cdf03288ab8fa363c313bd929c8b5183d14ab77)) +8. Fix search results missing events in subsequent pages in certain situations. + ([!35 (3551a6e)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/35/diffs?commit_id=3551a6ef7a29219b9b30f50a7e8c92b92debcdcf)) +9. Only process admin commands if the admin bot is in the admin room. + ([!43](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/43)) +10. Fix bug where invalid account data from a client could prevent a user from + joining any upgraded rooms and brick rooms that affected users attempted to + upgrade. + ([!53](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/53)) +11. Fix bug where unexpected keys were deleted from `m.direct` account data + events when joining an upgraded room. + ([!53](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/53)) +12. Fixed appservice users not receiving federated invites if the local server + isn't already resident in the room + ([!80](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/80)) +13. Fix bug where, if a server has multiple public keys, only one would be fetched. + ([!78](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/78)) +14. Fix bug where expired keys may not be re-fetched in some scenarios. + ([!78](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/78)) +15. Fix bug where signing keys would not be fetched when joining a room if we + hadn't previously seen any signing keys from that server. + ([!87](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/87)) +16. Fixed bug + ([#48](https://gitlab.computer.surgery/matrix/grapevine/-/issues/48)) + that caused us to attempt to fetch our own signing keys from ourselves over + federation, and fail ("Won't send federation request to ourselves"). + ([!96](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/96)) +17. Fixed incoming HTTP/2 requests failing federation signature check. + ([!104](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/104)) +18. Return 403 instead of 500 when joins to a local-only room are denied. + Consequently fixes Heisenbridge being unable to join puppeted users to its + rooms ([#85](https://gitlab.computer.surgery/matrix/grapevine/-/issues/85)). + ([!127](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/127)) +19. Fix handling of v11 rooms with `m.room.create` event content that passes + the authorization rules but doesn't match other parts of the spec. + ([!139](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/139)) +20. Fix tiebreaking comparisons between events during state resolution. This + will reduce the rate at which servers disagree about the state of rooms. + ([!141](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/141)) +21. Fix bug where the backoff state for remote device key queries was not reset + after a successful request, causing an increasing rate of key query failures + over time until a server restart. + ([!149](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/149)) +22. Fix bug where remote key queries that were skipped because the target server + was in backoff would increment the backoff delay further, leading to a + positive feedback loop. + ([!149](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/149)) +23. Return 504 M_NOT_YET_UPLOADED instead of 500 M_UNKNOWN when a media file is + present in the database but the contents are missing in the filesystem. + Removing media from the filesystem was the only way to delete media before + [!99](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/99), + so this situation is common. + ([!55](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/55)) + ([!153](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/153)) +24. Return 400 M_BAD_ALIAS from + [PUT /_matrix/client/v3/rooms/{roomId}/state/{eventType}/{stateKey}](https://spec.matrix.org/latest/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey) + instead of 400 M_FORBIDDEN when trying to set a canonical alias that does + not exist. + ([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158)) +25. Validate schema of new `m.room.canonical_alias` event sent by clients, + rather than silently allowing any contents if the event can't be parsed. + ([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158)) +26. Only validate canonical aliases that are new, rather than rather than + revalidating every alias. This makes it possible to add/remove aliases when + some of the existing aliases cannot be validated. + ([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158)) +27. Fix read receipts not being sent over federation (or only arbitrarily late) + ([!162](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/162)) +28. Fix bug where ban reasons would be ignored when the banned user already had + a member event in the room. + ([!185](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/185)) +29. Stop stripping unknown properties from redaction events before sending them + to clients. + ([!191](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/191)) + +### Added + +1. Add various conveniences for users of the Nix package. + ([51f9650](https://gitlab.computer.surgery/matrix/grapevine/-/commit/51f9650ca7bc9378690d331192c85fea3c151b58), + [bbb1a6f](https://gitlab.computer.surgery/matrix/grapevine/-/commit/bbb1a6fea45b16e8d4f94c1afbf7fa22c9281f37)) +2. Add a NixOS module. + ([33e7a46](https://gitlab.computer.surgery/matrix/grapevine/-/commit/33e7a46b5385ea9035c9d13c6775d63e5626a4c7)) +3. Add a Conduit compat mode. + ([a25f2ec](https://gitlab.computer.surgery/matrix/grapevine/-/commit/a25f2ec95045c5620c98eead88197a0bf13e6bb3)) + * **BREAKING:** If you're migrating from Conduit, this option must be enabled + or else your homeserver will refuse to start. +4. Include `GRAPEVINE_VERSION_EXTRA` information in the + `/_matrix/federation/v1/version` endpoint. + ([509b70b](https://gitlab.computer.surgery/matrix/grapevine/-/commit/509b70bd827fec23b88e223b57e0df3b42cede34)) +5. Allow multiple tracing subscribers to be active at once. + ([!20 (7a154f74)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=7a154f74166c1309ca5752149e02bbe44cd91431)) +6. Allow configuring the filter for `tracing-flame`. + ([!20 (507de06)](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/20/diffs?commit_id=507de063f53f52e0cf8e2c1a67215a5ad87bb35a)) +7. Collect HTTP response time metrics via OpenTelemetry and optionally expose + them as Prometheus metrics. This functionality is disabled by default. + ([!22](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/22)) +8. Collect metrics for lookup results (e.g. cache hits/misses). + ([!15](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/15), + [!36](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/36)) +9. Add configuration options for controlling the log format and colors. + ([!46](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/46)) +10. Recognize the `!admin` prefix to invoke admin commands. + ([!45](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/45)) +11. Add the `tracing-filter` admin command to view and change log/metrics/flame + filters dynamically at runtime. + ([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49), + [!164](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/164)) +12. Add more configuration options. + ([!49](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/49)) + * `observability.traces.filter`: The `tracing` filter to use for + OpenTelemetry traces. + * `observability.traces.endpoint`: Where OpenTelemetry should send traces. + * `observability.flame.filter`: The `tracing` filter for `tracing-flame`. + * `observability.flame.filename`: Where `tracing-flame` will write its + output. + * `observability.logs.timestamp`: Whether timestamps should be included in + the logs. +13. Support building nix packages without IFD + ([!73](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/73)) +14. Report local users getting banned in the server logs and admin room. + ([!65](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/65), + [!84](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/84)) +15. Added support for Authenticated Media ([MSC3916](https://github.com/matrix-org/matrix-spec-proposals/pull/3916)). + ([!58](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/58), + [!111](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/111)) +16. **BREAKING:** Added support for configuring and serving + `/.well-known/matrix/...` data. + ([!90](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/90), + [!94](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/94)) + * The `server_discovery.client.base_url` option is now required. +17. Added support for configuring old verify/signing keys in config (`federation.old_verify_keys`) + ([!96](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/96)) +18. Added admin commands to delete media + ([!99](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/99), + [!102](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/102), + [!148](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/148)) +19. Allow configuring the served API components per listener. + ([!109](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/109)) +20. Include the [`traceresponse` header](https://w3c.github.io/trace-context/#traceresponse-header) + if OpenTelemetry Tracing is in use. + ([!112](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/112)) +21. Sending SIGHUP to the grapevine process now reloads TLS certificates from + disk. + ([!97](https://gitlab.computer.surgery/matrix/grapevine-fork/-/merge_requests/97)) +22. Added a federation self-test, perfomed automatically on startup. + ([!106](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/106)) +23. Added support for HAProxy [proxy protocol](http://www.haproxy.org/download/3.0/doc/proxy-protocol.txt) + listeners. + ([!97](https://gitlab.computer.surgery/matrix/grapevine-fork/-/merge_requests/97)) +24. Add a `check-config` CLI subcommand to check whether the configuration file + is valid. + ([!121](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/121)) +25. Add configuration options to tune the value of each cache individually. + ([!124](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/124)) +26. Allow adding canonical aliases from remote servers. + ([!158](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/158)) +27. Grapevine now sends a User-Agent header on outbound requests + ([!189](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/189)) +28. Added the ability to listen on Unix sockets + ([!187](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/187)) +29. Added the ability to allow invalid TLS certificates + ([!203](https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/203)) diff --git a/book/code-of-conduct.md b/book/code-of-conduct.md new file mode 100644 index 00000000..80239ce9 --- /dev/null +++ b/book/code-of-conduct.md @@ -0,0 +1,12 @@ +# Code of conduct + +We follow the [Rust Code of Conduct][rust-coc] with some extra points: + +* In the absence of evidence to suggest otherwise, assume good faith when + engaging with others +* Moderation actions may be taken for behavior observed outside of + project-specific spaces +* We have limited patience, so violations may skip the warning and directly + result in a ban + +[rust-coc]: https://www.rust-lang.org/policies/code-of-conduct diff --git a/book/contributing.md b/book/contributing.md new file mode 100644 index 00000000..286cc3c3 --- /dev/null +++ b/book/contributing.md @@ -0,0 +1,15 @@ +# Contributing + +Instructions for getting GitLab access can be found on the [sign-in][sign-in] +page. + +GitLab access is primarily useful if you'd like to open issues, engage in +discussions on issues or merge requests, or submit your own merge requests. + +Note that if the sign-up process is too much trouble and you'd just +like to report an issue, feel free to report it in the Matrix room at +[#grapevine:computer.surgery][room]; someone with GitLab access can open an +issue on your behalf. + +[room]: https://matrix.to/#/#grapevine:computer.surgery +[sign-in]: https://gitlab.computer.surgery/users/sign_in diff --git a/book/contributing/security.md b/book/contributing/security.md new file mode 100644 index 00000000..f54e0567 --- /dev/null +++ b/book/contributing/security.md @@ -0,0 +1,15 @@ +# Coordinated vulnerability disclosure + +If you find a security vulnerability in Grapevine, please privately report it to +the Grapevine maintainers in one of the following ways: + +* Open a GitLab issue that's marked as confidential +* Create a private, invite-only, E2EE Matrix room and invite the following + users: + * `@charles:computer.surgery` + * `@olivia:computer.surgery` + * `@xiretza:xiretza.xyz` + +If the maintainers determine that the vulnerability is shared with Conduit or +other forks, we'll work with their teams to ensure that all affected projects +can release a fix at the same time. diff --git a/book/contributing/style-guide.md b/book/contributing/style-guide.md new file mode 100644 index 00000000..957a13a1 --- /dev/null +++ b/book/contributing/style-guide.md @@ -0,0 +1,148 @@ +# Style guide + +It is recommended that contributors follow this guide to minimize nitpicking on +their merge requests. + +However, this guide is not set in stone. It is open to changes as new patterns +emerge, requirements change, compelling arguments are made, and so on. The goal +is to document the existing style so it can be applied consistently, not to +ensure the style never changes. + +## Merge requests + +When updating a merge request branch, use `git rebase`; do not create merge +commits to merge other branches into a merge request branch. + +**Why?** This keeps the history simpler, and lacking merge commits makes it +easier to revert any individual commit. + +## Commit messages + +[Here's a good article][git-commit] on how to write good commit messages in +general. + +Specifically for this project: + +* Capitalizing the first letter is not required. +* It is recommended to avoid "conventional commits", as they take away from the + very limited subject line length, and will not be processed in an automated + fashion anyway. + +**Why?** The linked article explains why this is good practice. + +[git-commit]: https://cbea.ms/git-commit/ + +## Structuring commits + +Try to structure each commit so that it falls into one of the following +categories: + +* Refactoring, with no behavior change. +* Changing behavior, with no refactoring. +* Removing behavior, with no refactoring. +* Adding behavior, with no refactoring. +* Rewriting something completely. It is rare that these kinds of commits are + warranted. + +If you find yourself wanting to use the word "and" in the commit's subject line, +it should probably be broken into multiple commits. + +During code review, it's common to get feedback requesting changes to your +commits. To apply this feedback, do not make and push a new commit containing +the requested change. Instead, include the requested change in the commit of +yours that gave rise to the suggestion. If you are unfamiliar with rewriting +history in git, [this website][git-rebase] is a great tutorial. + +**Why?** Small, targeted, and well-explained commits make it easier for +reviewers to verify that a change has its intended effect. Or, for someone +running `git bisect` to find a more granular answer to why their test began +failing. + +[git-rebase]: https://git-rebase.io/ + +## `mod`/`use` order + +`mod` and `use` statements should appear in the following order, separated by +a blank line: + +1. `use` statements referring to `std`, `alloc`, or `core`, if any. +2. `use` statements referring to other crates, if any. +3. `use` statements referring to `super` or `crate`, if any. +4. Macro definitions that need to be accessible from child modules, if any. +5. `mod` statements, if any. +6. `use` statements referring to modules declared by the above `mod` statements, + if any. + +`rust-analyzer` and `rustfmt` automate most of this except points 4 and 5. + +**Why?** Consistency is good. + +## Testing + +When writing tests, be sure to keep the contents of [this article][cargo-test] +in mind. Especially, keeping Cargo unit tests in a dedicated tests file +(mentioned towards the end of the article). + +**Why?** The linked article explains why this is good practice. + +[cargo-test]: https://matklad.github.io/2021/02/27/delete-cargo-integration-tests.html + +## Tracing + +`tracing` events should: + +1. Start with a capital letter (when applicable). + +`tracing` events should not: + +1. End with punctuation. +2. Interpolate values into the event's message. + * Instead, add those values as structured fields. + +**Why?** Consistency is good. Also, interpolating values into the event message +essentially defeats the point of structured logging. + +### Examples + +#### 1 + +```rust,ignore +// This does not conform because it does not start with a capital letter. +info!("started pentametric fan"); + +// Do this instead: +info!("Started pentametric fan"); +``` + +#### 2 + +```rust,ignore +// This does not conform because it ends with punctuation. +info!("Started pentametric fan."); + +// Do this instead: +info!("Started pentametric fan"); +``` + +#### 3 + +```rust,ignore +// This does not conform because it interpolates values into the event's +// message. +warn!("Noticed {} discombobulated waneshafts", count); + +// Do this instead: +warn!(count, "Noticed discombobulated waneshafts"); +``` + +## Services + +Services are abstraction units that live inside the `src/service` directory. + +Calling service constructors must not cause side effects, with a few exceptions: + +* Database reads. +* Local filesystem reads. + +**Why?** This restriction makes it possible to implement subcommands that run +"offline" that reuse service code. diff --git a/book/installing.md b/book/installing.md new file mode 100644 index 00000000..a784ed3e --- /dev/null +++ b/book/installing.md @@ -0,0 +1,7 @@ +# Installing + +This chapter will explain how to start running a Grapevine instance for the +first time. + +**Note:** Pre-built binaries can be found in the [**Supported +targets**](./installing/supported-targets.md) subchapter. diff --git a/book/installing/migrating-conduit.md b/book/installing/migrating-conduit.md new file mode 100644 index 00000000..42cde3aa --- /dev/null +++ b/book/installing/migrating-conduit.md @@ -0,0 +1,118 @@ +# Migrating to/from Conduit + +Before migrating a Conduit instance to Grapevine, make sure to read through +all of the breaking changes listed in [the changelog](../changelog.md). + +In order to migrate an existing Conduit instance to/from Grapevine, the +Grapevine config must include `conduit_compat = true`. This parameter cannot +currently be modified after creating the database for the first time, so make +sure to set it when creating a fresh Grapevine instance that you may want to +migrate to a different implementation in the future. + +## Config + +Grapevine includes several breaking changes to the config schema. We don't +currently have docs on how to migrate an existing config. All breaking config +changes are mentioned in [the changelog](../changelog.md), so the best current +option is to read through those. Feel free to ask for config migration help in +[#grapevine:computer.surgery][room] if something is unclear. + +We plan to add [a config migration tool][config-migration-issue] to support +automatically migrating existing configs to the new schema. + +[room]: https://matrix.to/#/#grapevine:computer.surgery +[config-migration-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/38 + +## Filesystem + +Grapevine requires database data and media data to live in **separate**, +**non-nested** directories, which are configurable. Here is a typical example, +starting with the filesystem structure: + +```text +/var/lib/grapevine ++ database/ +| + database-file-1 +| + ... +| + database-file-n ++ media/ + + media-file-1 + + ... + + media-file-n +``` + +And here is the matching configuration: + +```toml +[database] +path = "/var/lib/grapevine/database" + +[media.backend] +type = "filesystem" +path = "/var/lib/grapevine/media" +``` + +On the other hand, Conduit's filesystem layout looks like this: + +```text +/var/lib/conduit ++ media/ +| + media-file-1 +| + ... +| + media-file-n ++ database-file-1 ++ ... ++ database-file-n +``` + +Which **nests** the media directory inside the database directory. Grapevine +will reject this layout, so the filesystem layout must be changed before +starting Grapevine. It is important to migrate the filesystem layout before +starting Grapevine, because otherwise it will create a fresh database instead of +using the existing one. + +## Database + +Grapevine is currently compatible with the Conduit 0.7.0 database format. It is +still possible to migrate to or from some newer Conduit versions, but it may +require manual intervention or break some functionality. + +We plan to add [a migration tool][db-compatibility-mr] to support cleanly +migrating to or from Conduit versions we are not internally compatible with. + +| Is migrating from | to | workable? | +|-|-|-| +| Conduit <=0.8.0 | Grapevine | Yes | +| Conduit 0.9.0 | Grapevine | [Yes, with caveats](#conduit-090-to-grapevine) | +| Grapevine | Conduit 0.7.0 | Yes | +| Grapevine | Conduit 0.8.0 | [Yes, with caveats](#grapevine-to-conduit-080-or-090) | +| Grapevine | Conduit 0.9.0 | [Yes, with caveats](#grapevine-to-conduit-080-or-090) | + +[db-compatibility-mr]: https://gitlab.computer.surgery/matrix/grapevine/-/merge_requests/85 + +### Conduit 0.9.0 to Grapevine + +Conduit 0.9.0 includes [a database migration][conduit-db-16-migration] that +modifies data that Grapevine doesn't read. Grapevine does not currently +recognize the new database schema version, and will fail to start against +a Conduit 0.9.0 database. Grapevine can start and run without issues if the +version recorded in the databse is rolled back from 16 to 13. It is possible to +do this by editing the database manually, or by modifying Grapevine to change +the version. [This patch][conduit-db-16-patch] is an example of the latter +approach. + +[conduit-db-16-migration]: https://gitlab.com/famedly/conduit/-/blob/f8d7ef04e664580e882bac852877b68e7bd3ab1e/src/database/mod.rs#L945 +[conduit-db-16-patch]: https://gitlab.computer.surgery/matrix/grapevine/-/commit/fdaa30f0d670c6f04f4e6be5d193f9146d179d95 + +### Grapevine to Conduit 0.8.0 or 0.9.0 + +Conduit 0.8.0 added [a new database table][alias_userid-commit] to track which +users created each room alias. Grapevine does not write to this table, so it is +not possible to delete aliases created in Grapevine through the normal +client-server API after migrating to Conduit 0.8.0. It is possible to delete +aliases with the `remove-alias` admin command. Note that this issue also applies +to migrations from Conduit <0.8.0 to Conduit 0.8.0. + +There are no additional known issues when migrating to Conduit 0.9.0. + +[alias_userid-commit]: https://gitlab.com/famedly/conduit/-/commit/144d548ef739324ca97db12e8cada60ca3e43e09 diff --git a/book/installing/migrating-conduwuit.md b/book/installing/migrating-conduwuit.md new file mode 100644 index 00000000..7530f532 --- /dev/null +++ b/book/installing/migrating-conduwuit.md @@ -0,0 +1,6 @@ +# Migrating to/from Conduwuit + +Current Conduwuit is explicitly incompatible with the Conduit/Grapevine +database format. Some older versions have been migrated to Grapevine +successfully, but we haven't evaluated which versions it is safe to migrate +from yet. Try this at your own risk, and *definitely* take a backup first. diff --git a/book/installing/supported-targets.md b/book/installing/supported-targets.md new file mode 100644 index 00000000..b099dd1d --- /dev/null +++ b/book/installing/supported-targets.md @@ -0,0 +1,37 @@ +# Supported targets + + + +| Architecture | Vendor | OS | libc | Linkage | Tier | Availability[^1], [^2] | +|-|-|-|-|-|-|-| +| aarch64 | unknown | linux | musl | static | 2 | Nix, [Download](https://gitlab.computer.surgery/api/v4/projects/matrix%2Fgrapevine/jobs/artifacts/main/raw/grapevine-static-aarch64-unknown-linux-musl?job=artifacts) | +| x86_64 | unknown | linux | glibc | dynamic | 1 | Nix | +| x86_64 | unknown | linux | musl | static | 2 | Nix, [Download](https://gitlab.computer.surgery/api/v4/projects/matrix%2Fgrapevine/jobs/artifacts/main/raw/grapevine-static-x86_64-unknown-linux-musl?job=artifacts) | + +[^1]: All download links refer to the latest build of the `main` branch. +[^2]: All targets can theoretically also be built from source without Nix. + However, this may require spending several hours debugging build systems. + +## Target tiers + +The "Tier" column for each target indicates the level of support that target +has. Below is an explanation of what each tier means. + +### Tier 1 + +Tier 1 targets are guaranteed to: + +* Build +* Pass the test suite + +### Tier 2 + +Tier 2 targets are guaranteed to: + +* Build + +## Unsupported targets + +Targets that don't appear in the table at the top of this page are unsupported. +At any given time, such targets may or may not build, and may or may not pass +the test suite. diff --git a/book/introduction.md b/book/introduction.md new file mode 100644 index 00000000..bc38b0f5 --- /dev/null +++ b/book/introduction.md @@ -0,0 +1,110 @@ +# Introduction + +Grapevine is a [Matrix][matrix] homeserver that was originally forked from +[Conduit 0.7.0][conduit]. + +[matrix]: https://matrix.org/ +[conduit]: https://gitlab.com/famedly/conduit/-/tree/v0.7.0?ref_type=tags + +## Chat with us + +Currently, the Matrix room at [#grapevine:computer.surgery][room] serves +multiple purposes: + +* General discussion about the project, such as answering questions about it +* Reporting issues with Grapevine, if getting GitLab access is too much trouble + for you +* Providing support to users running Grapevine +* Discussing the development of Grapevine + +If you'd like to engage in or observe any of those things, please join! + +[room]: https://matrix.to/#/#grapevine:computer.surgery + +## Can I use it? + +Theoretically yes, but it's not ready for general use yet, because: + +* [There aren't any releases][releases-issue]. +* [There isn't very much user-facing documentation][docs-issue]. +* There have been several additions and breaking changes to the configuration + file format that haven't been documented in detail. This means you'll need to + read the source code to figure out what all the options are and what they do. + +If these issues don't scare you away, go for it! (And if you use NixOS, [here's +an example][nixos-example].) + +[docs-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/21 +[releases-issue]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/18 +[nixos-example]: https://gitlab.computer.surgery/charles/servy-fleet/-/blob/main/config/grapevine/default.nix + +## Expectations management + +This project is run and maintained entirely by volunteers who are doing their +best. Additionally, due to our goals, the development of new features may be +slower than alternatives. We find this to be an acceptable tradeoff considering +the importance of the reliability of a project like this. + +## Goals + +Our goal is to provide a robust and reliable Matrix homeserver implementation. +In order to accomplish this, we aim to do the following: + +* Optimize for maintainability +* Implement automated testing to ensure correctness +* Improve instrumentation to provide real-world data to aid decision-making + +## Non-goals + +We also have some things we specifically want to avoid as we feel they inhibit +our ability to accomplish our goals: + +* macOS or Windows support + * These operating systems are very uncommon in the hobbyist server space, and + we feel our effort is better spent elsewhere. +* Docker support + * Docker tends to generate a high volume of support requests that are solely + due to Docker itself or how users are using Docker. In attempt to mitigate + this, we will not provide first-party Docker images. Instead, we'd recommend + avoiding Docker and either using our pre-built statically-linked binaries + or building from source. However, if your deployment mechanism *requires* + Docker, it should be straightforward to build your own Docker image. +* Configuration via environment variables + * Environment variables restrict the options for structuring configuration and + support for them would increase the maintenance burden. If your deployment + mechanism requires this, consider using an external tool like + [`envsubst`][envsubst]. +* Configuration compatibility with Conduit + * To provide a secure and ergonomic configuration experience, breaking changes + are required. However, [we do intend to provide a migration tool to ease + migration][migration-tool]. +* Perfect database compatibility with Conduit + * [This issue tracks the database compatibility status][db-compat]. In the + long run, it's inevitable that changes will be made to Conduit that we won't + want to pull in, or that we need to make changes that Conduit won't want to + pull in. + +[envsubst]: https://github.com/a8m/envsubst +[migration-tool]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/38 +[db-compat]: https://gitlab.computer.surgery/matrix/grapevine/-/issues/17 + +## Project management + +The project's current maintainers[^1] are: + +| Matrix username | GitLab username | +|-|-| +| `@charles:computer.surgery` | `charles` | +| `@olivia:computer.surgery` | `olivia` | +| `@xiretza:xiretza.xyz` | `Lambda` | + +We would like to expand this list in the future as social trust is built and +technical competence is demonstrated by other contributors. + +We require at least 1 approving code review from a maintainer[^2] before changes +can be merged. This number may increase in the future as the list of maintainers +grows. + +[^1]: A "maintainer" is someone who has the ability to close issues opened by + someone else and merge changes. +[^2]: A maintainer approving their own change doesn't count. diff --git a/book/migration.md b/book/migration.md new file mode 100644 index 00000000..432f4e47 --- /dev/null +++ b/book/migration.md @@ -0,0 +1 @@ +# Migration to/from Conduit diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 00000000..a6330574 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,4 @@ +doc-valid-idents = [ + "SemVer", + "..", +] diff --git a/complement/Dockerfile b/complement/Dockerfile deleted file mode 100644 index 813af10e..00000000 --- a/complement/Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -FROM rust:1.75.0 - -WORKDIR /workdir - -RUN apt-get update && apt-get install -y --no-install-recommends \ - libclang-dev - -COPY Cargo.toml Cargo.toml -COPY Cargo.lock Cargo.lock -COPY src src -RUN cargo build --release \ - && mv target/release/conduit conduit \ - && rm -rf target - -# Install caddy -RUN apt-get update \ - && apt-get install -y \ - debian-keyring \ - debian-archive-keyring \ - apt-transport-https \ - curl \ - && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/gpg.key' \ - | gpg --dearmor -o /usr/share/keyrings/caddy-testing-archive-keyring.gpg \ - && curl -1sLf 'https://dl.cloudsmith.io/public/caddy/testing/debian.deb.txt' \ - | tee /etc/apt/sources.list.d/caddy-testing.list \ - && apt-get update \ - && apt-get install -y caddy - -COPY conduit-example.toml conduit.toml -COPY complement/caddy.json caddy.json - -ENV SERVER_NAME=localhost -ENV CONDUIT_CONFIG=/workdir/conduit.toml - -RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml -RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml -RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml - -EXPOSE 8008 8448 - -CMD uname -a && \ - sed -i "s/#server_name = \"your.server.name\"/server_name = \"${SERVER_NAME}\"/g" conduit.toml && \ - sed -i "s/your.server.name/${SERVER_NAME}/g" caddy.json && \ - caddy start --config caddy.json > /dev/null && \ - /workdir/conduit diff --git a/complement/README.md b/complement/README.md deleted file mode 100644 index 185b251a..00000000 --- a/complement/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Complement - -## What's that? - -Have a look at [its repository](https://github.com/matrix-org/complement). - -## How do I use it with Conduit? - -The script at [`../bin/complement`](../bin/complement) has automation for this. -It takes a few command line arguments, you can read the script to find out what -those are. diff --git a/complement/caddy.json b/complement/caddy.json deleted file mode 100644 index ea52c2c9..00000000 --- a/complement/caddy.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "logging": { - "logs": { - "default": { - "level": "WARN" - } - } - }, - "apps": { - "http": { - "https_port": 8448, - "servers": { - "srv0": { - "listen": [":8448"], - "routes": [{ - "match": [{ - "host": ["your.server.name"] - }], - "handle": [{ - "handler": "subroute", - "routes": [{ - "handle": [{ - "handler": "reverse_proxy", - "upstreams": [{ - "dial": "127.0.0.1:8008" - }] - }] - }] - }], - "terminal": true - }], - "tls_connection_policies": [{ - "match": { - "sni": ["your.server.name"] - } - }] - } - } - }, - "pki": { - "certificate_authorities": { - "local": { - "name": "Complement CA", - "root": { - "certificate": "/complement/ca/ca.crt", - "private_key": "/complement/ca/ca.key" - }, - "intermediate": { - "certificate": "/complement/ca/ca.crt", - "private_key": "/complement/ca/ca.key" - } - } - } - }, - "tls": { - "automation": { - "policies": [{ - "subjects": ["your.server.name"], - "issuers": [{ - "module": "internal" - }], - "on_demand": true - }, { - "issuers": [{ - "module": "internal", - "ca": "local" - }] - }] - } - } - } -} \ No newline at end of file diff --git a/conduit-example.toml b/conduit-example.toml deleted file mode 100644 index c83bce74..00000000 --- a/conduit-example.toml +++ /dev/null @@ -1,67 +0,0 @@ -# ============================================================================= -# This is the official example config for Conduit. -# If you use it for your server, you will need to adjust it to your own needs. -# At the very least, change the server_name field! -# ============================================================================= - - -[global] -# The server_name is the pretty name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs - -# The Conduit server needs all /_matrix/ requests to be reachable at -# https://your.server.name/ on port 443 (client-server) and 8448 (federation). - -# If that's not possible for you, you can create /.well-known files to redirect -# requests. See -# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and -# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information - -# YOU NEED TO EDIT THIS -#server_name = "your.server.name" - -# This is the only directory where Conduit will save its data -database_path = "/var/lib/matrix-conduit/" -database_backend = "rocksdb" - -# The port Conduit will be running on. You need to set up a reverse proxy in -# your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port -# Docker users: Don't change this, you'll need to map an external port to this. -port = 6167 - -# Max size for uploads -max_request_size = 20_000_000 # in bytes - -# Enables registration. If set to false, no users can register on this server. -allow_registration = true - -# A static registration token that new users will have to provide when creating -# an account. YOU NEED TO EDIT THIS. -# - Insert a password that users will have to enter on registration -# - Start the line with '#' to remove the condition -registration_token = "" - -allow_federation = true -allow_check_for_updates = true - -# Enable the display name lightning bolt on registration. -enable_lightning_bolt = true - -# Servers listed here will be used to gather public keys of other servers. -# Generally, copying this exactly should be enough. (Currently, Conduit doesn't -# support batched key requests, so this list should only contain Synapse -# servers.) -trusted_servers = ["matrix.org"] - -#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time - -# Controls the log verbosity. See also [here][0]. -# -# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -#log = "..." - -address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy -#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. diff --git a/debian/README.md b/debian/README.md deleted file mode 100644 index 4ddb614d..00000000 --- a/debian/README.md +++ /dev/null @@ -1,37 +0,0 @@ -Conduit for Debian -================== - -Installation ------------- - -Information about downloading, building and deploying the Debian package, see -the "Installing Conduit" section in the Deploying docs. -All following sections until "Setting up the Reverse Proxy" be ignored because -this is handled automatically by the packaging. - -Configuration -------------- - -When installed, Debconf generates the configuration of the homeserver -(host)name, the address and port it listens on. This configuration ends up in -`/etc/matrix-conduit/conduit.toml`. - -You can tweak more detailed settings by uncommenting and setting the variables -in `/etc/matrix-conduit/conduit.toml`. This involves settings such as the maximum -file size for download/upload, enabling federation, etc. - -Running -------- - -The package uses the `matrix-conduit.service` systemd unit file to start and -stop Conduit. It loads the configuration file mentioned above to set up the -environment before running the server. - -This package assumes by default that Conduit will be placed behind a reverse -proxy such as Apache or nginx. This default deployment entails just listening -on `127.0.0.1` and the free port `6167` and is reachable via a client using the URL -. - -At a later stage this packaging may support also setting up TLS and running -stand-alone. In this case, however, you need to set up some certificates and -renewal, for it to work properly. diff --git a/debian/config b/debian/config deleted file mode 100644 index 8710ef97..00000000 --- a/debian/config +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -set -e - -# Source debconf library. -. /usr/share/debconf/confmodule - -# Ask for the Matrix homeserver name, address and port. -db_input high matrix-conduit/hostname || true -db_go - -db_input low matrix-conduit/address || true -db_go - -db_input medium matrix-conduit/port || true -db_go - -exit 0 diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service deleted file mode 100644 index 299f2680..00000000 --- a/debian/matrix-conduit.service +++ /dev/null @@ -1,47 +0,0 @@ -[Unit] -Description=Conduit Matrix homeserver -After=network.target - -[Service] -DynamicUser=yes -User=_matrix-conduit -Group=_matrix-conduit -Type=simple - -AmbientCapabilities= -CapabilityBoundingSet= -LockPersonality=yes -MemoryDenyWriteExecute=yes -NoNewPrivileges=yes -ProtectClock=yes -ProtectControlGroups=yes -ProtectHome=yes -ProtectHostname=yes -ProtectKernelLogs=yes -ProtectKernelModules=yes -ProtectKernelTunables=yes -ProtectSystem=strict -PrivateDevices=yes -PrivateMounts=yes -PrivateTmp=yes -PrivateUsers=yes -RemoveIPC=yes -RestrictAddressFamilies=AF_INET AF_INET6 -RestrictNamespaces=yes -RestrictRealtime=yes -RestrictSUIDSGID=yes -SystemCallArchitectures=native -SystemCallFilter=@system-service -SystemCallErrorNumber=EPERM -StateDirectory=matrix-conduit - -Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" - -ExecStart=/usr/sbin/matrix-conduit -Restart=on-failure -RestartSec=10 -StartLimitInterval=1m -StartLimitBurst=5 - -[Install] -WantedBy=multi-user.target diff --git a/debian/postinst b/debian/postinst deleted file mode 100644 index 6361af5a..00000000 --- a/debian/postinst +++ /dev/null @@ -1,104 +0,0 @@ -#!/bin/sh -set -e - -. /usr/share/debconf/confmodule - -CONDUIT_CONFIG_PATH=/etc/matrix-conduit -CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ - -case "$1" in - configure) - # Create the `_matrix-conduit` user if it does not exist yet. - if ! getent passwd _matrix-conduit > /dev/null ; then - echo 'Adding system user for the Conduit Matrix homeserver' 1>&2 - adduser --system --group --quiet \ - --home "$CONDUIT_DATABASE_PATH" \ - --disabled-login \ - --force-badname \ - _matrix-conduit - fi - - # Create the database path if it does not exist yet and fix up ownership - # and permissions. - mkdir -p "$CONDUIT_DATABASE_PATH" - chown _matrix-conduit "$CONDUIT_DATABASE_PATH" - chmod 700 "$CONDUIT_DATABASE_PATH" - - if [ ! -e "$CONDUIT_CONFIG_FILE" ]; then - # Write the debconf values in the config. - db_get matrix-conduit/hostname - CONDUIT_SERVER_NAME="$RET" - db_get matrix-conduit/address - CONDUIT_ADDRESS="$RET" - db_get matrix-conduit/port - CONDUIT_PORT="$RET" - mkdir -p "$CONDUIT_CONFIG_PATH" - cat > "$CONDUIT_CONFIG_FILE" << EOF -[global] -# The server_name is the pretty name of this server. It is used as a suffix for -# user and room ids. Examples: matrix.org, conduit.rs - -# The Conduit server needs all /_matrix/ requests to be reachable at -# https://your.server.name/ on port 443 (client-server) and 8448 (federation). - -# If that's not possible for you, you can create /.well-known files to redirect -# requests. See -# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and -# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information - -server_name = "${CONDUIT_SERVER_NAME}" - -# This is the only directory where Conduit will save its data. -database_path = "${CONDUIT_DATABASE_PATH}" -database_backend = "rocksdb" - -# The address Conduit will be listening on. -# By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to -# only listen on the localhost when using a reverse proxy. -address = "${CONDUIT_ADDRESS}" - -# The port Conduit will be running on. You need to set up a reverse proxy in -# your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port -# Docker users: Don't change this, you'll need to map an external port to this. -port = ${CONDUIT_PORT} - -# Max size for uploads -max_request_size = 20_000_000 # in bytes - -# Enables registration. If set to false, no users can register on this server. -allow_registration = true - -# A static registration token that new users will have to provide when creating -# an account. -# - Insert a password that users will have to enter on registration -# - Start the line with '#' to remove the condition -#registration_token = "" - -allow_federation = true -allow_check_for_updates = true - -# Enable the display name lightning bolt on registration. -enable_lightning_bolt = true - -# Servers listed here will be used to gather public keys of other servers. -# Generally, copying this exactly should be enough. (Currently, Conduit doesn't -# support batched key requests, so this list should only contain Synapse -# servers.) -trusted_servers = ["matrix.org"] - -#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time - -# Controls the log verbosity. See also [here][0]. -# -# [0]: https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives -#log = "..." -EOF - fi - ;; -esac - -#DEBHELPER# diff --git a/debian/postrm b/debian/postrm deleted file mode 100644 index 28949091..00000000 --- a/debian/postrm +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh -set -e - -. /usr/share/debconf/confmodule - -CONDUIT_CONFIG_PATH=/etc/matrix-conduit -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit - -case $1 in - purge) - # Remove debconf changes from the db - db_purge - - # Per https://www.debian.org/doc/debian-policy/ch-files.html#behavior - # "configuration files must be preserved when the package is removed, and - # only deleted when the package is purged." - if [ -d "$CONDUIT_CONFIG_PATH" ]; then - rm -r "$CONDUIT_CONFIG_PATH" - fi - - if [ -d "$CONDUIT_DATABASE_PATH" ]; then - rm -r "$CONDUIT_DATABASE_PATH" - fi - ;; -esac - -#DEBHELPER# diff --git a/debian/templates b/debian/templates deleted file mode 100644 index c4281ad3..00000000 --- a/debian/templates +++ /dev/null @@ -1,21 +0,0 @@ -Template: matrix-conduit/hostname -Type: string -Default: localhost -Description: The server (host)name of the Matrix homeserver - This is the hostname the homeserver will be reachable at via a client. - . - If set to "localhost", you can connect with a client locally and clients - from other hosts and also other homeservers will not be able to reach you! - -Template: matrix-conduit/address -Type: string -Default: 127.0.0.1 -Description: The listen address of the Matrix homeserver - This is the address the homeserver will listen on. Leave it set to 127.0.0.1 - when using a reverse proxy. - -Template: matrix-conduit/port -Type: string -Default: 6167 -Description: The port of the Matrix homeserver - This port is most often just accessed by a reverse proxy. diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile deleted file mode 100644 index 4c1199ed..00000000 --- a/docker/ci-binaries-packaging.Dockerfile +++ /dev/null @@ -1,84 +0,0 @@ -# syntax=docker/dockerfile:1 -# --------------------------------------------------------------------------------------------------------- -# This Dockerfile is intended to be built as part of Conduit's CI pipeline. -# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs. -# -# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. -# Credit's for the original Dockerfile: Weasy666. -# --------------------------------------------------------------------------------------------------------- - -FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner - - -# Standard port on which Conduit launches. -# You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 - -# Users are expected to mount a volume to this directory: -ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit - -ENV CONDUIT_PORT=6167 \ - CONDUIT_ADDRESS="0.0.0.0" \ - CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ - CONDUIT_CONFIG='' -# └─> Set no config file to do all configuration with env vars - -# Conduit needs: -# ca-certificates: for https -# iproute2: for `ss` for the healthcheck script -RUN apk add --no-cache \ - ca-certificates \ - iproute2 - -ARG CREATED -ARG VERSION -ARG GIT_REF -# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md -# including a custom label specifying the build command -LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ - org.opencontainers.image.ref.name="" - - -# Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh -HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh - -# Improve security: Don't run stuff as root, that does not need to run as root: -# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. -ARG USER_ID=1000 -ARG GROUP_ID=1000 -RUN set -x ; \ - deluser --remove-home www-data ; \ - addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \ - adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \ - addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1 - -# Change ownership of Conduit files to conduit user and group -RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh && \ - mkdir -p ${DEFAULT_DB_PATH} && \ - chown -cR conduit:conduit ${DEFAULT_DB_PATH} - -# Change user to conduit -USER conduit -# Set container home directory -WORKDIR /srv/conduit - -# Run Conduit and print backtraces on panics -ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/srv/conduit/conduit" ] - -# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") -# copy the matching binary into this docker image -ARG TARGETPLATFORM -COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh deleted file mode 100644 index 62f2f987..00000000 --- a/docker/healthcheck.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create -# try to get port from process list -if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') -fi - -# If CONDUIT_ADDRESS is not set try to get the address from the process list -if [ -z "${CONDUIT_ADDRESS}" ]; then - CONDUIT_ADDRESS=$(ss -tlpn | awk -F ' +|:' '/conduit/ { print $4 }') -fi - -# The actual health check. -# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. -# TODO: Change this to a single wget call. Do we have a config value that we can check for that? -wget --no-verbose --tries=1 --spider "http://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ - wget --no-verbose --tries=1 --spider "https://${CONDUIT_ADDRESS}:${CONDUIT_PORT}/_matrix/client/versions" || \ - exit 1 diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md deleted file mode 100644 index 2caca3ee..00000000 --- a/docs/SUMMARY.md +++ /dev/null @@ -1,12 +0,0 @@ -# Summary - -- [Introduction](introduction.md) - -- [Configuration](configuration.md) -- [Deploying](deploying.md) - - [Generic](deploying/generic.md) - - [Debian](deploying/debian.md) - - [Docker](deploying/docker.md) - - [NixOS](deploying/nixos.md) -- [TURN](turn.md) -- [Appservices](appservices.md) diff --git a/docs/appservices.md b/docs/appservices.md deleted file mode 100644 index 8ca015a0..00000000 --- a/docs/appservices.md +++ /dev/null @@ -1,61 +0,0 @@ -# Setting up Appservices - -## Getting help - -If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). - -## Set up the appservice - general instructions - -Follow whatever instructions are given by the appservice. This usually includes -downloading, changing its config (setting domain, homeserver url, port etc.) -and later starting it. - -At some point the appservice guide should ask you to add a registration yaml -file to the homeserver. In Synapse you would do this by adding the path to the -homeserver.yaml, but in Conduit you can do this from within Matrix: - -First, go into the #admins room of your homeserver. The first person that -registered on the homeserver automatically joins it. Then send a message into -the room like this: - - @conduit:your.server.name: register-appservice - ``` - paste - the - contents - of - the - yaml - registration - here - ``` - -You can confirm it worked by sending a message like this: -`@conduit:your.server.name: list-appservices` - -The @conduit bot should answer with `Appservices (1): your-bridge` - -Then you are done. Conduit will send messages to the appservices and the -appservice can send requests to the homeserver. You don't need to restart -Conduit, but if it doesn't work, restarting while the appservice is running -could help. - -## Appservice-specific instructions - -### Remove an appservice - -To remove an appservice go to your admin room and execute - -`@conduit:your.server.name: unregister-appservice ` - -where `` one of the output of `list-appservices`. - -### Tested appservices - -These appservices have been tested and work with Conduit without any extra steps: - -- [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) -- [mautrix-hangouts](https://github.com/mautrix/hangouts/) -- [mautrix-telegram](https://github.com/mautrix/telegram/) -- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. -- [heisenbridge](https://github.com/hifi/heisenbridge/) diff --git a/docs/configuration.md b/docs/configuration.md deleted file mode 100644 index efa080dc..00000000 --- a/docs/configuration.md +++ /dev/null @@ -1,110 +0,0 @@ -# Configuration - -**Conduit** is configured using a TOML file. The configuration file is loaded from the path specified by the `CONDUIT_CONFIG` environment variable. - -> **Note:** The configuration file is required to run Conduit. If the `CONDUIT_CONFIG` environment variable is not set, Conduit will exit with an error. - -> **Note:** If you update the configuration file, you must restart Conduit for the changes to take effect - -Conduit's configuration file is divided into the following sections: - -- [Global](#global) - - [TLS](#tls) - - [Proxy](#proxy) - - -## Global - -The `global` section contains the following fields: - -> **Note:** The `*` symbol indicates that the field is required, and the values in **parentheses** are the possible values - -| Field | Type | Description | Default | -| --- | --- | --- | --- | -| `address` | `string` | The address to bind to | `"127.0.0.1"` | -| `port` | `integer` | The port to bind to | `8000` | -| `tls` | `table` | See the [TLS configuration](#tls) | N/A | -| `server_name`_*_ | `string` | The server name | N/A | -| `database_backend`_*_ | `string` | The database backend to use (`"rocksdb"` *recommended*, `"sqlite"`) | N/A | -| `database_path`_*_ | `string` | The path to the database file/dir | N/A | -| `db_cache_capacity_mb` | `float` | The cache capacity, in MB | `300.0` | -| `enable_lightning_bolt` | `boolean` | Add `⚡️` emoji to end of user's display name | `true` | -| `allow_check_for_updates` | `boolean` | Allow Conduit to check for updates | `true` | -| `conduit_cache_capacity_modifier` | `float` | The value to multiply the default cache capacity by | `1.0` | -| `rocksdb_max_open_files` | `integer` | The maximum number of open files | `1000` | -| `pdu_cache_capacity` | `integer` | The maximum number of Persisted Data Units (PDUs) to cache | `150000` | -| `cleanup_second_interval` | `integer` | How often conduit should clean up the database, in seconds | `60` | -| `max_request_size` | `integer` | The maximum request size, in bytes | `20971520` (20 MiB) | -| `max_concurrent_requests` | `integer` | The maximum number of concurrent requests | `100` | -| `max_fetch_prev_events` | `integer` | The maximum number of previous events to fetch per request if conduit notices events are missing | `100` | -| `allow_registration` | `boolean` | Opens your homeserver to public registration | `false` | -| `registration_token` | `string` | The token users need to have when registering to your homeserver | N/A | -| `allow_encryption` | `boolean` | Allow users to enable encryption in their rooms | `true` | -| `allow_federation` | `boolean` | Allow federation with other servers | `true` | -| `allow_room_creation` | `boolean` | Allow users to create rooms | `true` | -| `allow_unstable_room_versions` | `boolean` | Allow users to create and join rooms with unstable versions | `true` | -| `default_room_version` | `string` | The default room version (`"6"`-`"10"`)| `"10"` | -| `allow_jaeger` | `boolean` | Allow Jaeger tracing | `false` | -| `tracing_flame` | `boolean` | Enable flame tracing | `false` | -| `proxy` | `table` | See the [Proxy configuration](#proxy) | N/A | -| `jwt_secret` | `string` | The secret used in the JWT to enable JWT login without it a 400 error will be returned | N/A | -| `trusted_servers` | `array` | The list of trusted servers to gather public keys of offline servers | `["matrix.org"]` | -| `log` | `string` | The log verbosity to use | `"warn"` | -| `turn_username` | `string` | The TURN username | `""` | -| `turn_password` | `string` | The TURN password | `""` | -| `turn_uris` | `array` | The TURN URIs | `[]` | -| `turn_secret` | `string` | The TURN secret | `""` | -| `turn_ttl` | `integer` | The TURN TTL in seconds | `86400` | -| `emergency_password` | `string` | Set a password to login as the `conduit` user in case of emergency | N/A | - - -### TLS -The `tls` table contains the following fields: -- `certs`: The path to the public PEM certificate -- `key`: The path to the PEM private key - -#### Example -```toml -[global.tls] -certs = "/path/to/cert.pem" -key = "/path/to/key.pem" -``` - - -### Proxy -You can choose what requests conduit should proxy (if any). The `proxy` table contains the following fields - -#### Global -The global option will proxy all outgoing requests. The `global` table contains the following fields: -- `url`: The URL of the proxy server -##### Example -```toml -[global.proxy.global] -url = "https://example.com" -``` - -#### By domain -An array of tables that contain the following fields: -- `url`: The URL of the proxy server -- `include`: Domains that should be proxied (assumed to be `["*"]` if unset) -- `exclude`: Domains that should not be proxied (takes precedent over `include`) - -Both `include` and `exclude` allow for glob pattern matching. -##### Example -In this example, all requests to domains ending in `.onion` and `matrix.secretly-an-onion-domain.xyz` -will be proxied via `socks://localhost:9050`, except for domains ending in `.myspecial.onion`. You can add as many `by_domain` tables as you need. -```toml -[[global.proxy.by_domain]] -url = "socks5://localhost:9050" -include = ["*.onion", "matrix.secretly-an-onion-domain.xyz"] -exclude = ["*.clearnet.onion"] -``` - -### Example - -> **Note:** The following example is a minimal configuration file. You should replace the values with your own. - -```toml -[global] -{{#include ../conduit-example.toml:22:}} -``` diff --git a/docs/deploying.md b/docs/deploying.md deleted file mode 100644 index 136e6538..00000000 --- a/docs/deploying.md +++ /dev/null @@ -1,3 +0,0 @@ -# Deploying - -This chapter describes various ways to deploy Conduit. diff --git a/docs/deploying/debian.md b/docs/deploying/debian.md deleted file mode 100644 index 2e8a544a..00000000 --- a/docs/deploying/debian.md +++ /dev/null @@ -1 +0,0 @@ -{{#include ../../debian/README.md}} diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml deleted file mode 100644 index c0bb042e..00000000 --- a/docs/deploying/docker-compose.for-traefik.yml +++ /dev/null @@ -1,69 +0,0 @@ -# Conduit - Behind Traefik Reverse Proxy -version: '3' - -services: - homeserver: - ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, - ### then you are ready to go. - image: matrixconduit/matrix-conduit:latest - ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - # build: - # context: . - # args: - # CREATED: '2021-03-16T08:18:27Z' - # VERSION: '0.1.0' - # LOCAL: 'false' - # GIT_REF: origin/master - restart: unless-stopped - volumes: - - db:/var/lib/matrix-conduit/ - networks: - - proxy - environment: - CONDUIT_SERVER_NAME: your.server.name # EDIT THIS - CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ - CONDUIT_DATABASE_BACKEND: rocksdb - CONDUIT_PORT: 6167 - CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - CONDUIT_ALLOW_REGISTRATION: 'true' - #CONDUIT_REGISTRATION_TOKEN: '' # require password for registration - CONDUIT_ALLOW_FEDERATION: 'true' - CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - CONDUIT_ADDRESS: 0.0.0.0 - CONDUIT_CONFIG: '' # Ignore this - - # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container - # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose override file. - well-known: - image: nginx:latest - restart: unless-stopped - volumes: - - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files - - ./nginx/www:/var/www/ # location of the client and server .well-known-files - ### Uncomment if you want to use your own Element-Web App. - ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Conduit - ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md - # element-web: - # image: vectorim/element-web:latest - # restart: unless-stopped - # volumes: - # - ./element_config.json:/app/config.json - # networks: - # - proxy - # depends_on: - # - homeserver - -volumes: - db: - -networks: - # This is the network Traefik listens to, if your network has a different - # name, don't forget to change it here and in the docker-compose.override.yml - proxy: - external: true diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml deleted file mode 100644 index 9525078d..00000000 --- a/docs/deploying/docker-compose.override.yml +++ /dev/null @@ -1,45 +0,0 @@ -# Conduit - Traefik Reverse Proxy Labels -version: '3' - -services: - homeserver: - labels: - - "traefik.enable=true" - - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - - "traefik.http.routers.to-conduit.rule=Host(`.`)" # Change to the address on which Conduit is hosted - - "traefik.http.routers.to-conduit.tls=true" - - "traefik.http.routers.to-conduit.tls.certresolver=letsencrypt" - - "traefik.http.routers.to-conduit.middlewares=cors-headers@docker" - - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - - # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container - # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose file. - well-known: - labels: - - "traefik.enable=true" - - "traefik.docker.network=proxy" - - - "traefik.http.routers.to-matrix-wellknown.rule=Host(`.`) && PathPrefix(`/.well-known/matrix`)" - - "traefik.http.routers.to-matrix-wellknown.tls=true" - - "traefik.http.routers.to-matrix-wellknown.tls.certresolver=letsencrypt" - - "traefik.http.routers.to-matrix-wellknown.middlewares=cors-headers@docker" - - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - - - ### Uncomment this if you uncommented Element-Web App in the docker-compose.yml - # element-web: - # labels: - # - "traefik.enable=true" - # - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - # - "traefik.http.routers.to-element-web.rule=Host(`.`)" # Change to the address on which Element-Web is hosted - # - "traefik.http.routers.to-element-web.tls=true" - # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml deleted file mode 100644 index 8ce3ad46..00000000 --- a/docs/deploying/docker-compose.with-traefik.yml +++ /dev/null @@ -1,96 +0,0 @@ -# Conduit - Behind Traefik Reverse Proxy -version: '3' - -services: - homeserver: - ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, - ### then you are ready to go. - image: matrixconduit/matrix-conduit:latest - ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - # build: - # context: . - # args: - # CREATED: '2021-03-16T08:18:27Z' - # VERSION: '0.1.0' - # LOCAL: 'false' - # GIT_REF: origin/master - restart: unless-stopped - volumes: - - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml - networks: - - proxy - environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUIT_ALLOW_REGISTRATION : 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_REGISTRATION_TOKEN: '' # require password for registration - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_ALLOW_ENCRYPTION: 'true' - # CONDUIT_ALLOW_FEDERATION: 'true' - # CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - - # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container - # to serve those two as static files. If you want to use a different way, delete or comment the below service, here - # and in the docker-compose override file. - well-known: - image: nginx:latest - restart: unless-stopped - volumes: - - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files - - ./nginx/www:/var/www/ # location of the client and server .well-known-files - - ### Uncomment if you want to use your own Element-Web App. - ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Conduit - ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md - # element-web: - # image: vectorim/element-web:latest - # restart: unless-stopped - # volumes: - # - ./element_config.json:/app/config.json - # networks: - # - proxy - # depends_on: - # - homeserver - - traefik: - image: "traefik:latest" - container_name: "traefik" - restart: "unless-stopped" - ports: - - "80:80" - - "443:443" - volumes: - - "/var/run/docker.sock:/var/run/docker.sock" - # - "./traefik_config:/etc/traefik" - - "acme:/etc/traefik/acme" - labels: - - "traefik.enable=true" - - # middleware redirect - - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" - # global redirect to https - - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" - - "traefik.http.routers.redirs.entrypoints=http" - - "traefik.http.routers.redirs.middlewares=redirect-to-https" - - networks: - - proxy - -volumes: - db: - acme: - -networks: - proxy: diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml deleted file mode 100644 index 97f91daf..00000000 --- a/docs/deploying/docker-compose.yml +++ /dev/null @@ -1,53 +0,0 @@ -# Conduit -version: '3' - -services: - homeserver: - ### If you already built the Conduit image with 'docker build' or want to use a registry image, - ### then you are ready to go. - image: matrixconduit/matrix-conduit:latest - ### If you want to build a fresh image from the sources, then comment the image line and uncomment the - ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: - ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d - # build: - # context: . - # args: - # CREATED: '2021-03-16T08:18:27Z' - # VERSION: '0.1.0' - # LOCAL: 'false' - # GIT_REF: origin/master - restart: unless-stopped - ports: - - 8448:6167 - volumes: - - db:/var/lib/matrix-conduit/ - environment: - CONDUIT_SERVER_NAME: your.server.name # EDIT THIS - CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ - CONDUIT_DATABASE_BACKEND: rocksdb - CONDUIT_PORT: 6167 - CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - CONDUIT_ALLOW_REGISTRATION: 'true' - CONDUIT_ALLOW_FEDERATION: 'true' - CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - CONDUIT_ADDRESS: 0.0.0.0 - CONDUIT_CONFIG: '' # Ignore this - # - ### Uncomment if you want to use your own Element-Web App. - ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and Conduit - ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md - # element-web: - # image: vectorim/element-web:latest - # restart: unless-stopped - # ports: - # - 8009:80 - # volumes: - # - ./element_config.json:/app/config.json - # depends_on: - # - homeserver - -volumes: - db: diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md deleted file mode 100644 index c19ef51b..00000000 --- a/docs/deploying/docker.md +++ /dev/null @@ -1,216 +0,0 @@ -# Conduit for Docker - -> **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. - -## Docker - -To run Conduit with Docker you can either build the image yourself or pull it from a registry. - - -### Use a registry - -OCI images for Conduit are available in the registries listed below. We recommend using the image tagged as `latest` from GitLab's own registry. - -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield-latest] | Stable image. | -| Docker Hub | [docker.io/matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield-latest] | Stable image. | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:next][gl] | ![Image Size][shield-next] | Development version. | -| Docker Hub | [docker.io/matrixconduit/matrix-conduit:next][dh] | ![Image Size][shield-next] | Development version. | - - -[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit -[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 -[shield-latest]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -[shield-next]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/next - - -Use -```bash -docker image pull -``` -to pull it to your machine. - - - -### Build using a dockerfile - -The Dockerfile provided by Conduit has two stages, each of which creates an image. - -1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. -2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. - -To build the image you can use the following command - -```bash -docker build --tag matrixconduit/matrix-conduit:latest . -``` - -which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. - - - -### Run - -When you have the image you can simply run it with - -```bash -docker run -d -p 8448:6167 \ - -v db:/var/lib/matrix-conduit/ \ - -e CONDUIT_SERVER_NAME="your.server.name" \ - -e CONDUIT_DATABASE_BACKEND="rocksdb" \ - -e CONDUIT_ALLOW_REGISTRATION=true \ - -e CONDUIT_ALLOW_FEDERATION=true \ - -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ - -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ - -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ - --name conduit -``` - -or you can use [docker-compose](#docker-compose). - -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../configuration.md). -You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need -to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. - -If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. - -### Docker-compose - -If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. - -Depending on your proxy setup, you can use one of the following files; -- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) -- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) -- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml) - -When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and -rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want -for your server. -Additional info about deploying Conduit can be found [here](generic.md). - -### Build - -To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: - -```bash -docker-compose up -``` - -This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. - -### Run - -If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: - -```bash -docker-compose up -d -``` - -> **Note:** Don't forget to modify and adjust the compose file to your needs. - -### Use Traefik as Proxy - -As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making -containerized app and services available through the web. With the two provided files, -[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or -[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and -[`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy to deploy -and use Conduit, with a little caveat. If you already took a look at the files, then you should have -seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and -loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to -either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and -`.well-known/matrix/server`. - -With the service `well-known` we use a single `nginx` container that will serve those two files. - -So...step by step: - -1. Copy [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or -[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml) from the repository and remove `.for-traefik` (or `.with-traefik`) from the filename. -2. Open both files and modify/adjust them to your needs. Meaning, change the `CONDUIT_SERVER_NAME` and the volume host mappings according to your needs. -3. Create the `conduit.toml` config file, an example can be found [here](../configuration.md), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. -4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. -5. Create the files needed by the `well-known` service. - - - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) - - ```nginx - server { - server_name .; - listen 80 default_server; - - location /.well-known/matrix/server { - return 200 '{"m.server": ".:443"}'; - types { } default_type "application/json; charset=utf-8"; - } - - location /.well-known/matrix/client { - return 200 '{"m.homeserver": {"base_url": "https://."}}'; - types { } default_type "application/json; charset=utf-8"; - add_header "Access-Control-Allow-Origin" *; - } - - location / { - return 404; - } - } - ``` - -6. Run `docker-compose up -d` -7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. - - - - -## Voice communication - -In order to make or receive calls, a TURN server is required. Conduit suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. Before proceeding with the software installation, it is essential to have the necessary configurations in place. - -### Configuration - -Create a configuration file called `coturn.conf` containing: - -```conf -use-auth-secret -static-auth-secret= -realm= -``` -A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. - -These same values need to be set in conduit. You can either modify conduit.toml to include these lines: -``` -turn_uris = ["turn:?transport=udp", "turn:?transport=tcp"] -turn_secret = "" -``` -or append the following to the docker environment variables dependig on which configuration method you used earlier: -```yml -CONDUIT_TURN_URIS: '["turn:?transport=udp", "turn:?transport=tcp"]' -CONDUIT_TURN_SECRET: "" -``` -Restart Conduit to apply these changes. - -### Run -Run the [Coturn](https://hub.docker.com/r/coturn/coturn) image using -```bash -docker run -d --network=host -v $(pwd)/coturn.conf:/etc/coturn/turnserver.conf coturn/coturn -``` - -or docker-compose. For the latter, paste the following section into a file called `docker-compose.yml` -and run `docker-compose up -d` in the same directory. - -```yml -version: 3 -services: - turn: - container_name: coturn-server - image: docker.io/coturn/coturn - restart: unless-stopped - network_mode: "host" - volumes: - - ./coturn.conf:/etc/coturn/turnserver.conf -``` - -To understand why the host networking mode is used and explore alternative configuration options, please visit the following link: https://github.com/coturn/coturn/blob/master/docker/coturn/README.md. -For security recommendations see Synapse's [Coturn documentation](https://github.com/matrix-org/synapse/blob/develop/docs/setup/turn/coturn.md#configuration). - diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md deleted file mode 100644 index 307de359..00000000 --- a/docs/deploying/generic.md +++ /dev/null @@ -1,292 +0,0 @@ -# Generic deployment documentation - -> ## Getting help -> -> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us -> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). - -## Installing Conduit - -Although you might be able to compile Conduit for Windows, we do recommend running it on a Linux server. We therefore -only offer Linux binaries. - -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the appropriate url: - -**Stable versions:** - -| CPU Architecture | Download stable version | -| ------------------------------------------- | --------------------------------------------------------------- | -| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | -| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | -| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | - -These builds were created on and linked against the glibc version shipped with Debian bullseye. -If you use a system with an older glibc version (e.g. RHEL8), you might need to compile Conduit yourself. - -[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master -[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master -[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master -[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master -[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master -[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master - -**Latest versions:** - -| Target | Type | Download | -|-|-|-| -| `x86_64-unknown-linux-musl` | Statically linked Debian package | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl.deb?job=artifacts) | -| `x86_64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/x86_64-unknown-linux-musl?job=artifacts) | -| `aarch64-unknown-linux-musl` | Statically linked binary | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/aarch64-unknown-linux-musl?job=artifacts) | -| `x86_64-unknown-linux-gnu` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-amd64.tar.gz?job=artifacts) | -| `aarch64-unknown-linux-musl` | OCI image | [link](https://gitlab.com/api/v4/projects/famedly%2Fconduit/jobs/artifacts/next/raw/oci-image-arm64v8.tar.gz?job=artifacts) | - -```bash -$ sudo wget -O /usr/local/bin/matrix-conduit -$ sudo chmod +x /usr/local/bin/matrix-conduit -``` - -Alternatively, you may compile the binary yourself. First, install any dependencies: - -```bash -# Debian -$ sudo apt install libclang-dev build-essential - -# RHEL -$ sudo dnf install clang -``` -Then, `cd` into the source tree of conduit-next and run: -```bash -$ cargo build --release -``` - -## Adding a Conduit user - -While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows -you to make sure that the file permissions are correctly set up. - -In Debian or RHEL, you can use this command to create a Conduit user: - -```bash -sudo adduser --system conduit --group --disabled-login --no-create-home -``` - -## Forwarding ports in the firewall or the router - -Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. - -If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. - -## Optional: Avoid port 8448 - -If Conduit runs behind Cloudflare reverse proxy, which doesn't support port 8448 on free plans, [delegation](https://matrix-org.github.io/synapse/latest/delegate.html) can be set up to have federation traffic routed to port 443: -```apache -# .well-known delegation on Apache - - ErrorDocument 200 '{"m.server": "your.server.name:443"}' - Header always set Content-Type application/json - Header always set Access-Control-Allow-Origin * - -``` -[SRV DNS record](https://spec.matrix.org/latest/server-server-api/#resolving-server-names) delegation is also [possible](https://www.cloudflare.com/en-gb/learning/dns/dns-records/dns-srv-record/). - -## Setting up a systemd service - -Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your -server reboots. Simply paste the default systemd service you can find below into -`/etc/systemd/system/conduit.service`. - -```systemd -[Unit] -Description=Conduit Matrix Server -After=network.target - -[Service] -Environment="CONDUIT_CONFIG=/etc/matrix-conduit/conduit.toml" -User=conduit -Group=conduit -Restart=always -ExecStart=/usr/local/bin/matrix-conduit - -[Install] -WantedBy=multi-user.target -``` - -Finally, run - -```bash -$ sudo systemctl daemon-reload -``` - -## Creating the Conduit configuration file - -Now we need to create the Conduit's config file in -`/etc/matrix-conduit/conduit.toml`. Paste in the contents of -[`conduit-example.toml`](../configuration.md) **and take a moment to read it. -You need to change at least the server name.** -You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. - -## Setting the correct file permissions - -As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on -Debian or RHEL: - -```bash -sudo chown -R root:root /etc/matrix-conduit -sudo chmod 755 /etc/matrix-conduit -``` - -If you use the default database path you also need to run this: - -```bash -sudo mkdir -p /var/lib/matrix-conduit/ -sudo chown -R conduit:conduit /var/lib/matrix-conduit/ -sudo chmod 700 /var/lib/matrix-conduit/ -``` - -## Setting up the Reverse Proxy - -This depends on whether you use Apache, Caddy, Nginx or another web server. - -### Apache - -Create `/etc/apache2/sites-enabled/050-conduit.conf` and copy-and-paste this: - -```apache -# Requires mod_proxy and mod_proxy_http -# -# On Apache instance compiled from source, -# paste into httpd-ssl.conf or httpd.conf - -Listen 8448 - - - -ServerName your.server.name # EDIT THIS - -AllowEncodedSlashes NoDecode -ProxyPass /_matrix/ http://127.0.0.1:6167/_matrix/ timeout=300 nocanon -ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ - - -``` - -**You need to make some edits again.** When you are done, run - -```bash -# Debian -$ sudo systemctl reload apache2 - -# Installed from source -$ sudo apachectl -k graceful -``` - -### Caddy - -Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name). - -```caddy -your.server.name, your.server.name:8448 { - reverse_proxy /_matrix/* 127.0.0.1:6167 -} -``` - -That's it! Just start or enable the service and you're set. - -```bash -$ sudo systemctl enable caddy -``` - -### Nginx - -If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` - -```nginx -server { - listen 443 ssl http2; - listen [::]:443 ssl http2; - listen 8448 ssl http2; - listen [::]:8448 ssl http2; - server_name your.server.name; # EDIT THIS - merge_slashes off; - - # Nginx defaults to only allow 1MB uploads - # Increase this to allow posting large files such as videos - client_max_body_size 20M; - - location /_matrix/ { - proxy_pass http://127.0.0.1:6167; - proxy_set_header Host $http_host; - proxy_buffering off; - proxy_read_timeout 5m; - } - - ssl_certificate /etc/letsencrypt/live/your.server.name/fullchain.pem; # EDIT THIS - ssl_certificate_key /etc/letsencrypt/live/your.server.name/privkey.pem; # EDIT THIS - ssl_trusted_certificate /etc/letsencrypt/live/your.server.name/chain.pem; # EDIT THIS - include /etc/letsencrypt/options-ssl-nginx.conf; -} -``` - -**You need to make some edits again.** When you are done, run - -```bash -$ sudo systemctl reload nginx -``` - -## SSL Certificate - -If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. - -The easiest way to get an SSL certificate, if you don't have one already, is to [install](https://certbot.eff.org/instructions) `certbot` and run this: - -```bash -# To use ECC for the private key, -# paste into /etc/letsencrypt/cli.ini: -# key-type = ecdsa -# elliptic-curve = secp384r1 - -$ sudo certbot -d your.server.name -``` -[Automated renewal](https://eff-certbot.readthedocs.io/en/stable/using.html#automated-renewals) is usually preconfigured. - -If using Cloudflare, configure instead the edge and origin certificates in dashboard. In case you’re already running a website on the same Apache server, you can just copy-and-paste the SSL configuration from your main virtual host on port 443 into the above-mentioned vhost. - -## You're done! - -Now you can start Conduit with: - -```bash -$ sudo systemctl start conduit -``` - -Set it to start automatically when your system boots with: - -```bash -$ sudo systemctl enable conduit -``` - -## How do I know it works? - -You can open [a Matrix client](https://matrix.org/ecosystem/clients), enter your homeserver and try to register. If you are using a registration token, use [Element web](https://app.element.io/), [Nheko](https://matrix.org/ecosystem/clients/nheko/) or [SchildiChat web](https://app.schildi.chat/), as they support this feature. - -You can also use these commands as a quick health check. - -```bash -$ curl https://your.server.name/_matrix/client/versions - -# If using port 8448 -$ curl https://your.server.name:8448/_matrix/client/versions -``` - -- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/). - If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. - -# What's next? - -## Audio/Video calls - -For Audio/Video call functionality see the [TURN Guide](../turn.md). - -## Appservices - -If you want to set up an appservice, take a look at the [Appservice Guide](../appservices.md). diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md deleted file mode 100644 index bf9b1a11..00000000 --- a/docs/deploying/nixos.md +++ /dev/null @@ -1,18 +0,0 @@ -# Conduit for NixOS - -Conduit can be acquired by Nix from various places: - -* The `flake.nix` at the root of the repo -* The `default.nix` at the root of the repo -* From Nixpkgs - -The `flake.nix` and `default.nix` do not (currently) provide a NixOS module, so -(for now) [`services.matrix-conduit`][module] from Nixpkgs should be used to -configure Conduit. - -If you want to run the latest code, you should get Conduit from the `flake.nix` -or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately. - -[module]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit -[package]: https://search.nixos.org/options?channel=unstable&query=services.matrix-conduit.package diff --git a/docs/introduction.md b/docs/introduction.md deleted file mode 100644 index da34ab6e..00000000 --- a/docs/introduction.md +++ /dev/null @@ -1,13 +0,0 @@ -# Conduit - -{{#include ../README.md:catchphrase}} - -{{#include ../README.md:body}} - -#### How can I deploy my own? - -- [Deployment options](deploying.md) - -If you want to connect an Appservice to Conduit, take a look at the [appservices documentation](appservices.md). - -{{#include ../README.md:footer}} diff --git a/docs/turn.md b/docs/turn.md deleted file mode 100644 index a61f1b13..00000000 --- a/docs/turn.md +++ /dev/null @@ -1,25 +0,0 @@ -# Setting up TURN/STURN - -## General instructions - -* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). - -## Edit/Add a few settings to your existing conduit.toml - -``` -# Refer to your Coturn settings. -# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. -turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] - -# static-auth-secret of your turnserver -turn_secret = "ADD SECRET HERE" - -# If you have your TURN server configured to use a username and password -# you can provide these information too. In this case comment out `turn_secret above`! -#turn_username = "" -#turn_password = "" -``` - -## Apply settings - -Restart Conduit. diff --git a/engage.toml b/engage.toml index cb284167..30e81d31 100644 --- a/engage.toml +++ b/engage.toml @@ -35,6 +35,25 @@ name = "lychee" group = "versions" script = "lychee --version" +[[task]] +name = "markdownlint" +group = "versions" +script = "markdownlint --version" + +[[task]] +name = "lychee" +group = "lints" +script = "lychee --offline ." + +[[task]] +name = "markdownlint" +group = "lints" +# don't just use 'markdownlint .' because it will lint files that are ignored by +# git +script = """ +git ls-files --cached --others --exclude-standard '*.md' | xargs markdownlint +""" + [[task]] name = "cargo-fmt" group = "lints" @@ -52,17 +71,25 @@ RUSTDOCFLAGS="-D warnings" cargo doc \ """ [[task]] -name = "cargo-clippy" +name = "cargo-clippy/none" +group = "lints" +script = """ +cargo clippy \ + --workspace \ + --all-targets \ + --no-default-features \ + --color=always \ + -- \ + -D warnings +""" + +[[task]] +name = "cargo-clippy/default" group = "lints" script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" [[task]] -name = "lychee" -group = "lints" -script = "lychee --offline docs" - -[[task]] -name = "cargo" +name = "cargo/default" group = "tests" script = """ cargo test \ diff --git a/flake.lock b/flake.lock index 1983d800..4a278e21 100644 --- a/flake.lock +++ b/flake.lock @@ -4,16 +4,17 @@ "inputs": { "crane": "crane", "flake-compat": "flake-compat", - "flake-utils": "flake-utils", + "flake-parts": "flake-parts", + "nix-github-actions": "nix-github-actions", "nixpkgs": "nixpkgs", "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1707922053, - "narHash": "sha256-wSZjK+rOXn+UQiP1NbdNn5/UW6UcBxjvlqr2wh++MbM=", + "lastModified": 1752217044, + "narHash": "sha256-5TomR72rn4q+5poQcN6EnanxeXKqJSqWVAoDAFN0lUc=", "owner": "zhaofengli", "repo": "attic", - "rev": "6eabc3f02fae3683bffab483e614bebfcd476b21", + "rev": "24fad0622fc9404c69e83bab7738359c5be4988e", "type": "github" }, "original": { @@ -24,18 +25,12 @@ } }, "crane": { - "inputs": { - "nixpkgs": [ - "attic", - "nixpkgs" - ] - }, "locked": { - "lastModified": 1702918879, - "narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=", + "lastModified": 1751562746, + "narHash": "sha256-smpugNIkmDeicNz301Ll1bD7nFOty97T79m4GUMUczA=", "owner": "ipetkov", "repo": "crane", - "rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb", + "rev": "aed2020fd3dc26e1e857d4107a5a67a33ab6c1fd", "type": "github" }, "original": { @@ -45,17 +40,12 @@ } }, "crane_2": { - "inputs": { - "nixpkgs": [ - "nixpkgs" - ] - }, "locked": { - "lastModified": 1713721181, - "narHash": "sha256-Vz1KRVTzU3ClBfyhOj8gOehZk21q58T1YsXC30V23PU=", + "lastModified": 1752946753, + "narHash": "sha256-g5uP3jIj+STUcfTJDKYopxnSijs2agRg13H0SGL5iE4=", "owner": "ipetkov", "repo": "crane", - "rev": "55f4939ac59ff8f89c6a4029730a2d49ea09105f", + "rev": "544d09fecc8c2338542c57f3f742f1a0c8c71e13", "type": "github" }, "original": { @@ -73,15 +63,16 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1709619709, - "narHash": "sha256-l6EPVJfwfelWST7qWQeP6t/TDK3HHv5uUB1b2vw4mOQ=", + "lastModified": 1752993983, + "narHash": "sha256-3YKCySMNhFDdHbFiRS4QbEwk0U5l42NMD1scDtniESY=", "owner": "nix-community", "repo": "fenix", - "rev": "c8943ea9e98d41325ff57d4ec14736d330b321b2", + "rev": "62105e0745d7450976b26dbd1497b8cbe15eb9ff", "type": "github" }, "original": { "owner": "nix-community", + "ref": "main", "repo": "fenix", "type": "github" } @@ -89,11 +80,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { @@ -105,74 +96,104 @@ "flake-compat_2": { "flake": false, "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "lastModified": 1747046372, + "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", "owner": "edolstra", "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", "type": "github" }, "original": { "owner": "edolstra", + "ref": "master", "repo": "flake-compat", "type": "github" } }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": [ + "attic", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1751413152, + "narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "77826244401ea9de6e3bac47c2db46005e1f30b5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, "flake-utils": { - "locked": { - "lastModified": 1667395993, - "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { "inputs": { "systems": "systems" }, "locked": { - "lastModified": 1709126324, - "narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=", + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", "owner": "numtide", "repo": "flake-utils", - "rev": "d465f4819400de7c8d874d50b982301f28a84605", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", "type": "github" }, "original": { "owner": "numtide", + "ref": "main", "repo": "flake-utils", "type": "github" } }, "nix-filter": { "locked": { - "lastModified": 1705332318, - "narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=", + "lastModified": 1731533336, + "narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=", "owner": "numtide", "repo": "nix-filter", - "rev": "3449dc925982ad46246cfc36469baf66e1b64f17", + "rev": "f7653272fd234696ae94229839a99b73c9ab7de0", "type": "github" }, "original": { "owner": "numtide", + "ref": "main", "repo": "nix-filter", "type": "github" } }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "attic", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1737420293, + "narHash": "sha256-F1G5ifvqTpJq7fdkT34e/Jy9VCyzd5XfJ9TO8fHhJWE=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "f4158fa080ef4503c8f4c820967d946c2af31ec9", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, "nixpkgs": { "locked": { - "lastModified": 1702539185, - "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "lastModified": 1751949589, + "narHash": "sha256-mgFxAPLWw0Kq+C8P3dRrZrOYEQXOtKuYVlo9xvPntt8=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "rev": "9b008d60392981ad674e04016d25619281550a9d", "type": "github" }, "original": { @@ -184,27 +205,27 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1702780907, - "narHash": "sha256-blbrBBXjjZt6OKTcYX1jpe9SRof2P9ZYWPzq22tzXAA=", + "lastModified": 1751741127, + "narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "1e2e384c5b7c50dbf8e9c441a9e58d85f408b01f", + "rev": "29e290002bfff26af1db6f64d070698019460302", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.11", + "ref": "nixos-25.05", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1709479366, - "narHash": "sha256-n6F0n8UV6lnTZbYPl1A9q1BS0p4hduAv1mGAP17CVd0=", + "lastModified": 1752950548, + "narHash": "sha256-NS6BLD0lxOrnCiEOcvQCDVPXafX1/ek1dfJHX1nUIzc=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "b8697e57f10292a6165a20f03d2f42920dfaf973", + "rev": "c87b95e25065c028d31a94f06a62927d18763fdf", "type": "github" }, "original": { @@ -214,25 +235,43 @@ "type": "github" } }, + "rocksdb": { + "flake": false, + "locked": { + "lastModified": 1752084860, + "narHash": "sha256-mKh6zsmxsiUix4LX+npiytmKvLbo6WNA9y4Ns/EY+bE=", + "owner": "facebook", + "repo": "rocksdb", + "rev": "410c5623195ecbe4699b9b5a5f622c7325cec6fe", + "type": "github" + }, + "original": { + "owner": "facebook", + "ref": "v10.4.2", + "repo": "rocksdb", + "type": "github" + } + }, "root": { "inputs": { "attic": "attic", "crane": "crane_2", "fenix": "fenix", "flake-compat": "flake-compat_2", - "flake-utils": "flake-utils_2", + "flake-utils": "flake-utils", "nix-filter": "nix-filter", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_2", + "rocksdb": "rocksdb" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1709571018, - "narHash": "sha256-ISFrxHxE0J5g7lDAscbK88hwaT5uewvWoma9TlFmRzM=", + "lastModified": 1752913824, + "narHash": "sha256-kRpDlijAr4p5VmcPSRw2mfhaBZ4cE3EDWzqLDIbASgA=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "9f14343f9ee24f53f17492c5f9b653427e2ad15e", + "rev": "ed193af36937d2fd4bb14a815ec589875c5c7304", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 114e221c..5d62b734 100644 --- a/flake.nix +++ b/flake.nix @@ -1,312 +1,130 @@ { + # Keep sorted inputs = { - nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; - flake-utils.url = "github:numtide/flake-utils"; - nix-filter.url = "github:numtide/nix-filter"; - flake-compat = { - url = "github:edolstra/flake-compat"; - flake = false; - }; - - fenix = { - url = "github:nix-community/fenix"; - inputs.nixpkgs.follows = "nixpkgs"; - }; - crane = { - url = "github:ipetkov/crane?ref=master"; - inputs.nixpkgs.follows = "nixpkgs"; - }; attic.url = "github:zhaofengli/attic?ref=main"; + crane.url = "github:ipetkov/crane?ref=master"; + fenix = { url = "github:nix-community/fenix?ref=main"; inputs.nixpkgs.follows = "nixpkgs"; }; + flake-compat = { url = "github:edolstra/flake-compat?ref=master"; flake = false; }; + flake-utils.url = "github:numtide/flake-utils?ref=main"; + nix-filter.url = "github:numtide/nix-filter?ref=main"; + nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; + rocksdb = { url = "github:facebook/rocksdb?ref=v10.4.2"; flake = false; }; }; - outputs = - { self - , nixpkgs - , flake-utils - , nix-filter - - , fenix - , crane - , ... - }: flake-utils.lib.eachDefaultSystem (system: + outputs = inputs: let - pkgsHost = nixpkgs.legacyPackages.${system}; - - # Nix-accessible `Cargo.toml` - cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml); - - # The Rust toolchain to use - toolchain = fenix.packages.${system}.fromToolchainFile { - file = ./rust-toolchain.toml; - - # See also `rust-toolchain.toml` - sha256 = "sha256-SXRtAuO4IqNOQq+nLbrsDFbVk+3aVA8NNpSZsKlVH/8="; + rust-manifest = builtins.fetchurl { + # Keep version in sync with rust-toolchain.toml + url = "https://static.rust-lang.org/dist/channel-rust-1.88.0.toml"; + sha256 = "sha256-Qxt8XAuaUR2OMdKbN4u8dBJOhSHxS+uS06Wl9+flVEk="; }; - builder = pkgs: - ((crane.mkLib pkgs).overrideToolchain toolchain).buildPackage; + # Keep sorted + mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { + craneLib = + (inputs.crane.mkLib pkgs).overrideToolchain (_: self.toolchain); - nativeBuildInputs = pkgs: [ - # bindgen needs the build platform's libclang. Apparently due to - # "splicing weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't - # quite do the right thing here. - pkgs.pkgsBuildHost.rustPlatform.bindgenHook - ]; + default = self.callPackage ./nix/pkgs/default {}; - rocksdb' = pkgs: - let - version = "9.1.0"; - in - pkgs.rocksdb.overrideAttrs (old: { - inherit version; - src = pkgs.fetchFromGitHub { - owner = "facebook"; - repo = "rocksdb"; - rev = "v${version}"; - hash = "sha256-vRPyrXkXVVhP56n5FVYef8zbIsnnanQSpElmQLZ7mh8="; + inherit inputs; + + # Return a new scope with overrides applied to the 'default' package + overrideDefaultPackage = args: self.overrideScope (final: prev: { + default = prev.default.override args; + }); + + rocksdb = self.callPackage ./nix/pkgs/rocksdb { + inherit (pkgs) rocksdb; }; - }); - env = pkgs: { - CONDUIT_VERSION_EXTRA = self.shortRev or self.dirtyShortRev; - ROCKSDB_INCLUDE_DIR = "${rocksdb' pkgs}/include"; - ROCKSDB_LIB_DIR = "${rocksdb' pkgs}/lib"; - } - // pkgs.lib.optionalAttrs pkgs.stdenv.hostPlatform.isStatic { - ROCKSDB_STATIC = ""; - } - // { - CARGO_BUILD_RUSTFLAGS = let inherit (pkgs) lib stdenv; in - lib.concatStringsSep " " ([] - ++ lib.optionals - # This disables PIE for static builds, which isn't great in terms - # of security. Unfortunately, my hand is forced because nixpkgs' - # `libstdc++.a` is built without `-fPIE`, which precludes us from - # leaving PIE enabled. - stdenv.hostPlatform.isStatic - ["-C" "relocation-model=static"] - ++ lib.optionals - (stdenv.buildPlatform.config != stdenv.hostPlatform.config) - ["-l" "c"] - ++ lib.optionals - # This check has to match the one [here][0]. We only need to set - # these flags when using a different linker. Don't ask me why, - # though, because I don't know. All I know is it breaks otherwise. - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L37-L40 - ( - # Nixpkgs doesn't check for x86_64 here but we do, because I - # observed a failure building statically for x86_64 without - # including it here. Linkers are weird. - (stdenv.hostPlatform.isAarch64 || stdenv.hostPlatform.isx86_64) - && stdenv.hostPlatform.isStatic - && !stdenv.isDarwin - && !stdenv.cc.bintools.isLLVM + shell = self.callPackage ./nix/shell.nix {}; + + # The Rust toolchain to use + # Using fromManifestFile and parsing the toolchain file with importTOML + # instead of fromToolchainFile to avoid IFD + toolchain = let + toolchainFile = pkgs.lib.importTOML ./rust-toolchain.toml; + defaultProfileComponents = [ + "rustc" + "cargo" + "rust-docs" + "rustfmt" + "clippy" + ]; + components = defaultProfileComponents ++ + toolchainFile.toolchain.components; + targets = toolchainFile.toolchain.targets; + fenix = inputs.fenix.packages.${pkgs.stdenv.buildPlatform.system}; + + nativeToolchain = (fenix.fromManifestFile rust-manifest) + .withComponents components; + crossComponents = builtins.map + (target: + (fenix.targets.${target}.fromManifestFile rust-manifest) + .rust-std) + targets; + in + fenix.combine ([nativeToolchain] ++ crossComponents); + + website-root = self.callPackage ./nix/pkgs/website-root {}; + }); + in + inputs.flake-utils.lib.eachDefaultSystem (system: + let + pkgs = import inputs.nixpkgs { + inherit system; + + # Some users find it useful to set this on their Nixpkgs instance and + # we want to support that use case, so we set it here too to help us + # test/ensure that this works. + config.allowAliases = false; + }; + in + { + packages = { + default = (mkScope pkgs).default; + website-root = (mkScope pkgs).website-root; + } + // + builtins.listToAttrs + (builtins.concatLists + (builtins.map + (crossSystem: + let + binaryName = "static-${crossSystem}"; + pkgsCrossStatic = + (import inputs.nixpkgs { + inherit system; + crossSystem = { + config = crossSystem; + }; + + # Some users find it useful to set this on their Nixpkgs + # instance and we want to support that use case, so we set + # it here too to help us test/ensure that this works. + config.allowAliases = false; + }).pkgsStatic; + in + [ + # An output for a statically-linked binary + { + name = binaryName; + value = (mkScope pkgsCrossStatic).default; + } + ] ) [ - "-l" - "stdc++" - "-L" - "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" - ] - ); - } - - # What follows is stolen from [here][0]. Its purpose is to properly - # configure compilers and linkers for various stages of the build, and - # even covers the case of build scripts that need native code compiled and - # run on the build platform (I think). - # - # [0]: https://github.com/NixOS/nixpkgs/blob/5cdb38bb16c6d0a38779db14fcc766bc1b2394d6/pkgs/build-support/rust/lib/default.nix#L57-L80 - // ( - let - inherit (pkgs.rust.lib) envVars; - in - pkgs.lib.optionalAttrs - (pkgs.stdenv.targetPlatform.rust.rustcTarget - != pkgs.stdenv.hostPlatform.rust.rustcTarget) - ( - let - inherit (pkgs.stdenv.targetPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = - envVars.linkerForTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForHost; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForHost; - CARGO_BUILD_TARGET = rustcTarget; - } - ) - // ( - let - inherit (pkgs.stdenv.buildPlatform.rust) cargoEnvVarTarget; - in - { - "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; - "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; - "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.linkerForBuild; - HOST_CC = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/cc"; - HOST_CXX = "${pkgs.pkgsBuildHost.stdenv.cc}/bin/c++"; - } - )); - - package = pkgs: builder pkgs { - src = nix-filter { - root = ./.; - include = [ - "src" - "Cargo.toml" - "Cargo.lock" - ]; - }; - - # This is redundant with CI - doCheck = false; - - env = env pkgs; - nativeBuildInputs = nativeBuildInputs pkgs; - - meta.mainProgram = cargoToml.package.name; - }; - - mkOciImage = pkgs: package: - pkgs.dockerTools.buildImage { - name = package.pname; - tag = "next"; - copyToRoot = [ - pkgs.dockerTools.caCertificates - ]; - config = { - # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) - # are handled as expected - Entrypoint = [ - "${pkgs.lib.getExe' pkgs.tini "tini"}" - "--" - ]; - Cmd = [ - "${pkgs.lib.getExe package}" - ]; - }; - }; - in - { - packages = { - default = package pkgsHost; - oci-image = mkOciImage pkgsHost self.packages.${system}.default; - - book = - let - package = self.packages.${system}.default; - in - pkgsHost.stdenv.mkDerivation { - pname = "${package.pname}-book"; - version = package.version; - - src = nix-filter { - root = ./.; - include = [ - "book.toml" - "conduit-example.toml" - "README.md" - "debian/README.md" - "docs" - ]; - }; - - nativeBuildInputs = (with pkgsHost; [ - mdbook - ]); - - buildPhase = '' - mdbook build - mv public $out - ''; - }; - } - // - builtins.listToAttrs - (builtins.concatLists - (builtins.map - (crossSystem: - let - binaryName = "static-${crossSystem}"; - pkgsCrossStatic = - (import nixpkgs { - inherit system; - crossSystem = { - config = crossSystem; - }; - }).pkgsStatic; - in - [ - # An output for a statically-linked binary - { - name = binaryName; - value = package pkgsCrossStatic; - } - - # An output for an OCI image based on that binary - { - name = "oci-image-${crossSystem}"; - value = mkOciImage - pkgsCrossStatic - self.packages.${system}.${binaryName}; - } + "x86_64-unknown-linux-musl" + "aarch64-unknown-linux-musl" ] ) - [ - "x86_64-unknown-linux-musl" - "aarch64-unknown-linux-musl" - ] - ) - ); + ); - devShells.default = pkgsHost.mkShell { - env = env pkgsHost // { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so. The - # `rust-src` component is required in order for this to work. - RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; - }; - - # Development tools - nativeBuildInputs = nativeBuildInputs pkgsHost ++ [ - # Always use nightly rustfmt because most of its options are unstable - # - # This needs to come before `toolchain` in this list, otherwise - # `$PATH` will have stable rustfmt instead. - fenix.packages.${system}.latest.rustfmt - - toolchain - ] ++ (with pkgsHost; [ - engage - - # Needed for producing Debian packages - cargo-deb - - # Needed for Complement - go - olm - - # Needed for our script for Complement - jq - - # Needed for finding broken markdown links - lychee - - # Useful for editing the book locally - mdbook - ]); - }; - }); + devShells.default = (mkScope pkgs).shell; + } + ) + // + { + nixosModules.default = import ./nix/modules/default inputs; + }; } diff --git a/nix/modules/default/default.nix b/nix/modules/default/default.nix new file mode 100644 index 00000000..38468e44 --- /dev/null +++ b/nix/modules/default/default.nix @@ -0,0 +1,154 @@ +inputs: + +{ config +, lib +, pkgs +, ... +}: + +let + inherit (lib) types; + + cfg = config.services.grapevine; + configFile = format.generate "config.toml" cfg.settings; + validateConfig = file: pkgs.runCommand "grapevine-checked-config" {} '' + ${lib.getExe cfg.package} check-config -sc ${lib.escapeShellArg file} + ln -s ${lib.escapeShellArg file} "$out" + ''; + format = pkgs.formats.toml {}; +in + +{ + options.services.grapevine = { + enable = lib.mkEnableOption "grapevine"; + package = lib.mkPackageOption + inputs.self.packages.${pkgs.stdenv.hostPlatform.system} + "grapevine" + { + default = "default"; + pkgsText = "inputs.grapevine.packages.\${pkgs.stdenv.hostPlatform.system}"; + }; + + settings = lib.mkOption { + type = types.submodule { + freeformType = format.type; + options = { + conduit_compat = lib.mkOption { + type = types.bool; + description = '' + Whether to operate as a drop-in replacement for Conduit. + ''; + default = false; + }; + database = { + backend = lib.mkOption { + type = types.nonEmptyStr; + description = '' + The database backend to use. + ''; + default = "rocksdb"; + }; + path = lib.mkOption { + type = types.nonEmptyStr; + readOnly = true; + description = '' + The path to store database files in. + + Note that this is read-only because this module makes use of + systemd's `StateDirectory` option. + ''; + default = if cfg.settings.conduit_compat + then "/var/lib/matrix-conduit/database" + else "/var/lib/grapevine/database"; + }; + }; + media.backend = { + type = lib.mkOption { + type = types.nonEmptyStr; + readOnly = true; + description = '' + The media backend to use. + + Note that this is read-only because `filesystem` is currently + the only valid option. + ''; + default = "filesystem"; + }; + path = lib.mkOption { + type = types.nonEmptyStr; + readOnly = true; + description = '' + The path to store database files in. + + Note that this is read-only because this module makes use of + systemd's `StateDirectory` option. + ''; + default = if cfg.settings.conduit_compat + then "/var/lib/matrix-conduit/media" + else "/var/lib/grapevine/media"; + }; + }; + listen = lib.mkOption { + type = types.listOf format.type; + description = '' + List of places to listen for incoming connections. + ''; + default = [ + { + type = "tcp"; + address = "::1"; + port = 6167; + } + ]; + }; + }; + }; + default = {}; + description = '' + The TOML configuration file is generated from this attribute set. + ''; + }; + }; + + config = lib.mkIf cfg.enable { + systemd.services.grapevine = { + description = "Grapevine (Matrix homeserver)"; + wantedBy = [ "multi-user.target" ]; + + # Keep sorted + serviceConfig = { + DynamicUser = true; + ExecStart = "${lib.getExe cfg.package} serve --config ${validateConfig configFile}"; + LockPersonality = true; + MemoryDenyWriteExecute = true; + PrivateDevices = true; + PrivateMounts = true; + PrivateUsers = true; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + Restart = "on-failure"; + RestartSec = 10; + RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ]; + RestrictNamespaces = true; + RestrictRealtime = true; + StartLimitBurst = 5; + StateDirectory = if cfg.settings.conduit_compat + then "matrix-conduit" + else "grapevine"; + StateDirectoryMode = "0700"; + SystemCallArchitectures = "native"; + SystemCallFilter = [ "@system-service" "~@privileged" ]; + TimeoutStartSec = "infinity"; + Type = "notify"; + UMask = "077"; + User = if cfg.settings.conduit_compat + then "conduit" + else "grapevine"; + }; + }; + }; +} diff --git a/nix/pkgs/default/cross-compilation-env.nix b/nix/pkgs/default/cross-compilation-env.nix new file mode 100644 index 00000000..b10b9a26 --- /dev/null +++ b/nix/pkgs/default/cross-compilation-env.nix @@ -0,0 +1,95 @@ +# Keep sorted +{ lib +, pkgsBuildHost +, rust +, snappy +, stdenv +}: + +lib.optionalAttrs stdenv.hostPlatform.isStatic { + ROCKSDB_STATIC = ""; +} +// +{ + CARGO_BUILD_RUSTFLAGS = + lib.concatStringsSep + " " + ([] + # This disables PIE for static builds, which isn't great in terms of + # security. Unfortunately, my hand is forced because nixpkgs' + # `libstdc++.a` is built without `-fPIE`, which precludes us from + # leaving PIE enabled. + ++ lib.optionals + stdenv.hostPlatform.isStatic + [ "-C" "relocation-model=static" ] + + # I'm not sure why any of this is necessary but it is so *shrug* + ++ lib.optionals + (stdenv.buildPlatform.config != stdenv.hostPlatform.config) + [ + "-l" + "c" + + "-l" + "stdc++" + "-L" + "${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib" + + "-l" + "snappy" + "-L" + "${snappy}/lib" + ] + ); +} + +# What follows is stolen from [here][0]. Its purpose is to properly configure +# compilers and linkers for various stages of the build, and even covers the +# case of build scripts that need native code compiled and run on the build +# platform (I think). +# +# [0]: https://github.com/NixOS/nixpkgs/blob/2768c7d042a37de65bb1b5b3268fc987e534c49d/pkgs/build-support/rust/lib/default.nix#L45-L68 +// +( + let + inherit (rust.lib) envVars; + in + lib.optionalAttrs + (stdenv.targetPlatform.rust.rustcTarget + != stdenv.hostPlatform.rust.rustcTarget) + ( + let + inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForTarget; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget; + } + ) + // + ( + let + inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForHost; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForHost; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost; + CARGO_BUILD_TARGET = rustcTarget; + } + ) + // + ( + let + inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget; + in + { + "CC_${cargoEnvVarTarget}" = envVars.ccForBuild; + "CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild; + "CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild; + HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc"; + HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++"; + } + ) +) diff --git a/nix/pkgs/default/default.nix b/nix/pkgs/default/default.nix new file mode 100644 index 00000000..60b85a38 --- /dev/null +++ b/nix/pkgs/default/default.nix @@ -0,0 +1,109 @@ +# Dependencies (keep sorted) +{ craneLib +, inputs +, jq +, lib +, pkgsBuildHost +, rocksdb +, rust +, snappy +, stdenv + +# Options (keep sorted) +, default-features ? true +, features ? [] +, profile ? "release" +, version-extra ? inputs.self.shortRev + or inputs.self.dirtyShortRev + or null, +}: + +let + cargoManifest = lib.importTOML "${inputs.self}/Cargo.toml"; + + buildDepsOnlyEnv = + { + NIX_OUTPATH_USED_AS_RANDOM_SEED = "randomseed"; + CARGO_PROFILE = profile; + ROCKSDB_INCLUDE_DIR = "${rocksdb}/include"; + ROCKSDB_LIB_DIR = "${rocksdb}/lib"; + } + // + (import ./cross-compilation-env.nix { + # Keep sorted + inherit + lib + pkgsBuildHost + rust + snappy + stdenv; + }); + + buildPackageEnv = + (lib.optionalAttrs (version-extra != null) { + GRAPEVINE_VERSION_EXTRA = version-extra; + }) + // buildDepsOnlyEnv; + + commonAttrs = { + # Reading from cargoManifest directly instead of using + # createNameFromCargoToml to avoid IFD + pname = cargoManifest.package.name; + version = cargoManifest.package.version; + + src = let filter = inputs.nix-filter.lib; in filter { + root = inputs.self; + + # Keep sorted + include = [ + ".cargo/config.toml" + "Cargo.lock" + "Cargo.toml" + "src" + ]; + }; + + dontStrip = profile != "release"; + + nativeBuildInputs = [ + # bindgen needs the build platform's libclang. Apparently due to "splicing + # weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the + # right thing here. + pkgsBuildHost.rustPlatform.bindgenHook + + # We don't actually depend on `jq`, but crane's `buildPackage` does, but + # its `buildDepsOnly` doesn't. This causes those two derivations to have + # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious + # rebuilds of bindgen and its depedents. + jq + ]; + + # Opt out of crane's automagic cross support + doIncludeCrossToolchainEnv = false; + + # This is redundant with CI + doCheck = false; + }; +in + +craneLib.buildPackage (commonAttrs // { + cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // { + env = buildDepsOnlyEnv; + }); + + cargoExtraArgs = "--locked " + + lib.optionalString + (!default-features) + "--no-default-features " + + lib.optionalString + (features != []) + "--features " + (builtins.concatStringsSep "," features); + + env = buildPackageEnv; + + passthru = { + env = buildPackageEnv; + }; + + meta.mainProgram = commonAttrs.pname; +}) diff --git a/nix/pkgs/rocksdb/default.nix b/nix/pkgs/rocksdb/default.nix new file mode 100644 index 00000000..6292e977 --- /dev/null +++ b/nix/pkgs/rocksdb/default.nix @@ -0,0 +1,40 @@ +# Dependencies (keep sorted) +{ inputs +, lib +, rocksdb +}: + +let + rocksdb' = (rocksdb.overrideAttrs (old: { + src = inputs.rocksdb; + version = lib.removePrefix + "v" + (builtins.fromJSON + (builtins.readFile ../../../flake.lock) + ).nodes.rocksdb.original.ref; + })).override { + enableLiburing = false; + }; + + cVersion = rocksdb'.version; + + rustVersion = builtins.elemAt + (lib.splitString + "+" + (lib.findSingle + (x: x.name == "rust-librocksdb-sys") + (builtins.throw "Multiple rust-librocksdb-sys versions in Cargo.lock") + (builtins.throw "No rust-librocksdb-sys versions in Cargo.lock") + (builtins.fromTOML (builtins.readFile ../../../Cargo.lock)).package + ).version + ) + 1; +in + +if cVersion == rustVersion + then rocksdb' + else builtins.throw + (builtins.concatStringsSep " " [ + "C version (${cVersion}) and Rust version (${rustVersion}) of RocksDB do" + "not match" + ]) diff --git a/nix/pkgs/website-root/default.nix b/nix/pkgs/website-root/default.nix new file mode 100644 index 00000000..bbb93a25 --- /dev/null +++ b/nix/pkgs/website-root/default.nix @@ -0,0 +1,28 @@ +# Keep sorted +{ inputs +, lib +, mdbook +, stdenv +}: + +stdenv.mkDerivation { + name = "website-root"; + + src = let filter = inputs.nix-filter.lib; in filter { + root = inputs.self; + + # Keep sorted + include = [ + "book" + "book.toml" + ]; + }; + + buildPhase = '' + ${lib.getExe mdbook} build + ''; + + installPhase = '' + mv target/book $out + ''; +} diff --git a/nix/shell.nix b/nix/shell.nix new file mode 100644 index 00000000..34e9d5c2 --- /dev/null +++ b/nix/shell.nix @@ -0,0 +1,55 @@ +# Keep sorted +{ cargo-insta +, default +, engage +, findutils +, inputs +, jq +, lychee +, markdownlint-cli +, mdbook +, mkShell +, ripgrep +, stdenv +, toolchain +}: + +mkShell { + env = default.env // { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so. The + # `rust-src` component is required in order for this to work. + RUST_SRC_PATH = "${toolchain}/lib/rustlib/src/rust/library"; + }; + + # Development tools + nativeBuildInputs = [ + # Always use nightly rustfmt because most of its options are unstable + # + # This needs to come before `toolchain` in this list, otherwise + # `$PATH` will have stable rustfmt instead. + inputs.fenix.packages.${stdenv.buildPlatform.system}.latest.rustfmt + + # Keep sorted + cargo-insta + engage + findutils + jq + lychee + markdownlint-cli + mdbook + ripgrep + toolchain + ] + ++ + default.nativeBuildInputs + ++ + default.propagatedBuildInputs + ++ + default.buildInputs; + + shellHook = '' + # Workaround for + unset TMPDIR + ''; +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index f7a94340..b892bc27 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -2,7 +2,6 @@ # # Other files that need upkeep when this changes: # -# * `.gitlab-ci.yml` # * `Cargo.toml` # * `flake.nix` # @@ -10,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.75.0" +channel = "1.88.0" components = [ # For rust-analyzer "rust-src", @@ -18,5 +17,6 @@ components = [ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", "aarch64-unknown-linux-musl", ] diff --git a/rustfmt.toml b/rustfmt.toml index 739b454f..a0729a75 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,2 +1,18 @@ -unstable_features = true -imports_granularity="Crate" +edition = "2021" + +condense_wildcard_suffixes = true +error_on_line_overflow = true +format_code_in_doc_comments = true +format_macro_bodies = true +format_macro_matchers = true +format_strings = true +group_imports = "StdExternalCrate" +hex_literal_case = "Upper" +imports_granularity = "Crate" +max_width = 80 +newline_style = "Unix" +reorder_impl_items = true +use_field_init_shorthand = true +use_small_heuristics = "Off" +use_try_shorthand = true +wrap_comments = true diff --git a/src/api.rs b/src/api.rs new file mode 100644 index 00000000..b19322b8 --- /dev/null +++ b/src/api.rs @@ -0,0 +1,5 @@ +pub(crate) mod appservice_server; +pub(crate) mod client_server; +pub(crate) mod ruma_wrapper; +pub(crate) mod server_server; +pub(crate) mod well_known; diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 3ec7a66e..117d1914 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,27 +1,28 @@ -use crate::{services, utils, Error, Result}; +use std::{fmt::Debug, mem, time::Duration}; + use bytes::BytesMut; use ruma::api::{ - appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, + SendAccessToken, }; -use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; +use crate::{services, utils, Error, Result}; + /// Sends a request to an appservice /// -/// Only returns None if there is no url specified in the appservice registration file +/// Only returns None if there is no url specified in the appservice +/// registration file #[tracing::instrument(skip(request))] -pub(crate) async fn send_request( +pub(crate) async fn send_request( registration: Registration, request: T, ) -> Result> where - T: Debug, + T: OutgoingRequest + Debug, { - let destination = match registration.url { - Some(url) => url, - None => { - return Ok(None); - } + let Some(destination) = registration.url else { + return Ok(None); }; let hs_token = registration.hs_token.as_str(); @@ -33,7 +34,7 @@ where &[MatrixVersion::V1_0], ) .unwrap() - .map(|body| body.freeze()); + .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); let old_path_and_query = parts.path_and_query.unwrap().as_str().to_owned(); @@ -44,38 +45,36 @@ where }; parts.path_and_query = Some( - (old_path_and_query + symbol + "access_token=" + hs_token) + format!("{old_path_and_query}{symbol}access_token={hs_token}") .parse() .unwrap(), ); - *http_request.uri_mut() = parts.try_into().expect("our manipulation is always valid"); + *http_request.uri_mut() = + parts.try_into().expect("our manipulation is always valid"); let mut reqwest_request = reqwest::Request::try_from(http_request)?; *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = match services() + let mut response = services() .globals .default_client() .execute(reqwest_request) .await - { - Ok(r) => r, - Err(e) => { + .inspect_err(|error| { warn!( - "Could not send request to appservice {:?} at {}: {}", - registration.id, destination, e + %error, + appservice = registration.id, + %destination, + "Could not send request to appservice", ); - return Err(e.into()); - } - }; + })?; // reqwest::Response -> http::Response conversion let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); + let mut http_response_builder = + http::Response::builder().status(status).version(response.version()); mem::swap( response.headers_mut(), http_response_builder @@ -83,18 +82,22 @@ where .expect("http::response::Builder is usable"), ); - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error: {}", e); + // TODO: handle timeout + let body = response.bytes().await.unwrap_or_else(|error| { + warn!(%error, "Server error"); Vec::new().into() - }); // TODO: handle timeout + }); if status != 200 { warn!( - "Appservice returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - utils::string_from_bytes(&body) + appservice = %destination, + %status, + %url, + body = %utils::dbg_truncate_str( + String::from_utf8_lossy(&body).as_ref(), + 100, + ), + "Appservice returned bad response", ); } @@ -104,10 +107,12 @@ where .expect("reqwest body is valid http body"), ); - response.map(Some).map_err(|_| { + response.map(Some).map_err(|error| { warn!( - "Appservice returned invalid response bytes {}\n{}", - destination, url + %error, + appservice = %destination, + %url, + "Appservice returned invalid response bytes", ); Error::BadServerResponse("Server returned bad response.") }) diff --git a/src/api/client_server.rs b/src/api/client_server.rs new file mode 100644 index 00000000..65c5985d --- /dev/null +++ b/src/api/client_server.rs @@ -0,0 +1,72 @@ +mod account; +mod alias; +mod backup; +mod capabilities; +mod config; +mod context; +mod device; +mod directory; +mod filter; +mod keys; +mod media; +mod membership; +mod message; +mod profile; +mod push; +mod read_marker; +mod redact; +mod relations; +mod report; +mod room; +mod search; +mod session; +mod space; +mod state; +mod sync; +mod tag; +mod thirdparty; +mod threads; +mod to_device; +mod typing; +mod unversioned; +mod user_directory; +mod voip; + +pub(crate) use account::*; +pub(crate) use alias::*; +pub(crate) use backup::*; +pub(crate) use capabilities::*; +pub(crate) use config::*; +pub(crate) use context::*; +pub(crate) use device::*; +pub(crate) use directory::*; +pub(crate) use filter::*; +pub(crate) use keys::*; +pub(crate) use media::*; +pub(crate) use membership::*; +pub(crate) use message::*; +pub(crate) use profile::*; +pub(crate) use push::*; +pub(crate) use read_marker::*; +pub(crate) use redact::*; +pub(crate) use relations::*; +pub(crate) use report::*; +pub(crate) use room::*; +pub(crate) use search::*; +pub(crate) use session::*; +pub(crate) use space::*; +pub(crate) use state::*; +pub(crate) use sync::*; +pub(crate) use tag::*; +pub(crate) use thirdparty::*; +pub(crate) use threads::*; +pub(crate) use to_device::*; +pub(crate) use typing::*; +pub(crate) use unversioned::*; +pub(crate) use user_directory::*; +pub(crate) use voip::*; + +pub(crate) const DEVICE_ID_LENGTH: usize = 10; +pub(crate) const TOKEN_LENGTH: usize = 32; +pub(crate) const SESSION_ID_LENGTH: usize = 32; +pub(crate) const AUTO_GEN_PASSWORD_LENGTH: usize = 15; diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 0226abc7..8d729439 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,22 +1,25 @@ -use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{api::client_server, services, utils, Error, Result, Ruma}; +use register::RegistrationKind; use ruma::{ api::client::{ account::{ change_password, deactivate, get_3pids, get_username_availability, register::{self, LoginType}, - request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, ThirdPartyIdRemovalStatus, + request_3pid_management_token_via_email, + request_3pid_management_token_via_msisdn, whoami, + ThirdPartyIdRemovalStatus, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, - push, UserId, + events::room::message::RoomMessageEventContent, + push, + serde::Raw, + UserId, }; use tracing::{info, warn}; -use register::RegistrationKind; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use crate::{api::client_server, services, utils, Ar, Error, Ra, Result}; const RANDOM_USER_ID_LENGTH: usize = 10; @@ -29,10 +32,11 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// - The server name of the user id matches this server /// - No user or appservice on this server already claimed this username /// -/// Note: This will not reserve the username, so the username might become invalid when trying to register -pub async fn get_register_available_route( - body: Ruma, -) -> Result { +/// Note: This will not reserve the username, so the username might become +/// invalid when trying to register +pub(crate) async fn get_register_available_route( + body: Ar, +) -> Result> { // Validate user id let user_id = UserId::parse_with_server_name( body.username.to_lowercase(), @@ -40,7 +44,8 @@ pub async fn get_register_available_route( ) .ok() .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == services().globals.server_name() + !user_id.is_historical() + && user_id.server_name() == services().globals.server_name() }) .ok_or(Error::BadRequest( ErrorKind::InvalidUsername, @@ -58,26 +63,37 @@ pub async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::v3::Response { available: true }) + Ok(Ra(get_username_availability::v3::Response { + available: true, + })) } /// # `POST /_matrix/client/r0/register` /// /// Register an account on this homeserver. /// -/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html) +/// You can use [`GET +/// /_matrix/client/r0/register/available`](get_register_available_route) /// to check if the user id is valid and available. /// /// - Only works if registration is enabled -/// - If type is guest: ignores all parameters except initial_device_display_name +/// - If type is guest: ignores all parameters except +/// `initial_device_display_name` /// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) -/// - If type is not guest and no username is given: Always fails after UIAA check +/// - If type is not guest and no username is given: Always fails after UIAA +/// check /// - Creates a new account and populates it with default account data -/// - If `inhibit_login` is false: Creates a device and returns device id and access_token -pub async fn register_route(body: Ruma) -> Result { - if !services().globals.allow_registration() && body.appservice_info.is_none() { +/// - If `inhibit_login` is false: Creates a device and returns `device_id` and +/// `access_token` +#[allow(clippy::too_many_lines)] +pub(crate) async fn register_route( + body: Ar, +) -> Result> { + if !services().globals.allow_registration() + && body.appservice_info.is_none() + { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Registration has been disabled.", )); } @@ -120,7 +136,7 @@ pub async fn register_route(body: Ruma) -> Result) -> Result) -> Result) -> Result) -> Result) -> Result, -) -> Result { +pub(crate) async fn change_password_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password], }], completed: Vec::new(), - params: Default::default(), + params: Some(Box::default()), session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - services() - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + let (worked, uiaainfo) = services().uiaa.try_auth( + sender_user, + sender_device, + auth, + &uiaainfo, + )?; if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err(Error::Uiaa(Box::new(uiaainfo))); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services() - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(Box::new(uiaainfo))); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services() - .users - .set_password(sender_user, Some(&body.new_password))?; + services().users.set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { // Logout all devices except the current one for id in services() .users .all_device_ids(sender_user) - .filter_map(|id| id.ok()) + .filter_map(Result::ok) .filter(|id| id != sender_device) { services().users.remove_device(sender_user, &id)?; } } - info!("User {} changed their password.", sender_user); - services() - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} changed their password." - ))); + info!(user_id = %sender_user, "User changed their password"); + services().admin.send_message(RoomMessageEventContent::notice_plain( + format!("User {sender_user} changed their password."), + )); - Ok(change_password::v3::Response {}) + Ok(Ra(change_password::v3::Response {})) } /// # `GET _matrix/client/r0/account/whoami` /// -/// Get user_id of the sender user. +/// Get `user_id` of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route(body: Ruma) -> Result { +pub(crate) async fn whoami_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device_id = body.sender_device.as_ref().cloned(); + let device_id = body.sender_device.clone(); - Ok(whoami::v3::Response { + Ok(Ra(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest: services().users.is_deactivated(sender_user)? && body.appservice_info.is_none(), - }) + is_guest: services().users.is_deactivated(sender_user)? + && body.appservice_info.is_none(), + })) } /// # `POST /_matrix/client/r0/account/deactivate` @@ -395,41 +404,43 @@ pub async fn whoami_route(body: Ruma) -> Result, -) -> Result { +pub(crate) async fn deactivate_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password], }], completed: Vec::new(), - params: Default::default(), + params: Some(Box::default()), session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - services() - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + let (worked, uiaainfo) = services().uiaa.try_auth( + sender_user, + sender_device, + auth, + &uiaainfo, + )?; if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err(Error::Uiaa(Box::new(uiaainfo))); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services() - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(Box::new(uiaainfo))); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } @@ -440,16 +451,14 @@ pub async fn deactivate_route( // Remove devices and mark account as deactivated services().users.deactivate_account(sender_user)?; - info!("User {} deactivated their account.", sender_user); - services() - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} deactivated their account." - ))); + info!(user_id = %sender_user, "User deactivated their account"); + services().admin.send_message(RoomMessageEventContent::notice_plain( + format!("User {sender_user} deactivated their account."), + )); - Ok(deactivate::v3::Response { + Ok(Ra(deactivate::v3::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - }) + })) } /// # `GET _matrix/client/v3/account/3pid` @@ -457,22 +466,25 @@ pub async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub async fn third_party_route( - body: Ruma, -) -> Result { - let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); +pub(crate) async fn third_party_route( + body: Ar, +) -> Result> { + let _sender_user = + body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_3pids::v3::Response::new(Vec::new())) + Ok(Ra(get_3pids::v3::Response::new(Vec::new()))) } /// # `POST /_matrix/client/v3/account/3pid/email/requestToken` /// -/// "This API should be used to request validation tokens when adding an email address to an account" +/// "This API should be used to request validation tokens when adding an email +/// address to an account" /// -/// - 403 signals that The homeserver does not allow the third party identifier as a contact option. -pub async fn request_3pid_management_token_via_email_route( - _body: Ruma, -) -> Result { +/// - 403 signals that The homeserver does not allow the third party identifier +/// as a contact option. +pub(crate) async fn request_3pid_management_token_via_email_route( + _body: Ar, +) -> Result> { Err(Error::BadRequest( ErrorKind::ThreepidDenied, "Third party identifier is not allowed", @@ -481,12 +493,14 @@ pub async fn request_3pid_management_token_via_email_route( /// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` /// -/// "This API should be used to request validation tokens when adding an phone number to an account" +/// "This API should be used to request validation tokens when adding an phone +/// number to an account" /// -/// - 403 signals that The homeserver does not allow the third party identifier as a contact option. -pub async fn request_3pid_management_token_via_msisdn_route( - _body: Ruma, -) -> Result { +/// - 403 signals that The homeserver does not allow the third party identifier +/// as a contact option. +pub(crate) async fn request_3pid_management_token_via_msisdn_route( + _body: Ar, +) -> Result> { Err(Error::BadRequest( ErrorKind::ThreepidDenied, "Third party identifier is not allowed", diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7cbe9fa1..16052d06 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,8 +1,7 @@ -use crate::{services, Error, Result, Ruma}; use rand::seq::SliceRandom; use ruma::{ api::{ - appservice, + appservice::query::query_room_alias, client::{ alias::{create_alias, delete_alias, get_alias}, error::ErrorKind, @@ -12,12 +11,17 @@ use ruma::{ OwnedRoomAliasId, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -pub async fn create_alias_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_alias_route( + body: Ar, +) -> Result> { + let sender_user = + body.sender_user.as_deref().expect("user is authenticated"); + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -25,39 +29,31 @@ pub async fn create_alias_route( )); } - if let Some(ref info) = body.appservice_info { + if let Some(info) = &body.appservice_info { if !info.aliases.is_match(body.room_alias.as_str()) { return Err(Error::BadRequest( ErrorKind::Exclusive, "Room alias is not in namespace.", )); } - } else if services() - .appservice - .is_exclusive_alias(&body.room_alias) - .await - { + } else if services().appservice.is_exclusive_alias(&body.room_alias).await { return Err(Error::BadRequest( ErrorKind::Exclusive, "Room alias reserved by appservice.", )); } - if services() - .rooms - .alias - .resolve_local_alias(&body.room_alias)? - .is_some() - { + if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } - services() - .rooms - .alias - .set_alias(&body.room_alias, &body.room_id)?; + services().rooms.alias.set_alias( + &body.room_alias, + &body.room_id, + sender_user, + )?; - Ok(create_alias::v3::Response::new()) + Ok(Ra(create_alias::v3::Response::new())) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -66,9 +62,12 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -pub async fn delete_alias_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_alias_route( + body: Ar, +) -> Result> { + let sender_user = + body.sender_user.as_deref().expect("user is authenticated"); + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -76,29 +75,25 @@ pub async fn delete_alias_route( )); } - if let Some(ref info) = body.appservice_info { + if let Some(info) = &body.appservice_info { if !info.aliases.is_match(body.room_alias.as_str()) { return Err(Error::BadRequest( ErrorKind::Exclusive, "Room alias is not in namespace.", )); } - } else if services() - .appservice - .is_exclusive_alias(&body.room_alias) - .await - { + } else if services().appservice.is_exclusive_alias(&body.room_alias).await { return Err(Error::BadRequest( ErrorKind::Exclusive, "Room alias reserved by appservice.", )); } - services().rooms.alias.remove_alias(&body.room_alias)?; + services().rooms.alias.remove_alias(&body.room_alias, sender_user)?; // TODO: update alt_aliases? - Ok(delete_alias::v3::Response::new()) + Ok(Ra(delete_alias::v3::Response::new())) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -106,12 +101,14 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -pub async fn get_alias_route( - body: Ruma, -) -> Result { - get_alias_helper(body.body.room_alias).await +pub(crate) async fn get_alias_route( + body: Ar, +) -> Result> { + get_alias_helper(body.body.room_alias).await.map(Ra) } +// Can't use `services().rooms.alias.resolve_alias` because we also need the set +// of servers from the remote get_room_information request. pub(crate) async fn get_alias_helper( room_alias: OwnedRoomAliasId, ) -> Result { @@ -121,7 +118,7 @@ pub(crate) async fn get_alias_helper( .send_federation_request( room_alias.server_name(), federation::query::get_room_information::v1::Request { - room_alias: room_alias.to_owned(), + room_alias: room_alias.clone(), }, ) .await?; @@ -143,7 +140,7 @@ pub(crate) async fn get_alias_helper( .sending .send_appservice_request( appservice.registration.clone(), - appservice::query::query_room_alias::v1::Request { + query_room_alias::v1::Request { room_alias: room_alias.clone(), }, ) @@ -157,23 +154,23 @@ pub(crate) async fn get_alias_helper( .alias .resolve_local_alias(&room_alias)? .ok_or_else(|| { - Error::bad_config("Appservice lied to us. Room does not exist.") + Error::bad_config( + "Appservice lied to us. Room does not \ + exist.", + ) })?, ); break; } } } - }; + } - let room_id = match room_id { - Some(room_id) => room_id, - None => { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room with alias not found.", - )) - } + let Some(room_id) = room_id else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room with alias not found.", + )); }; Ok(get_alias::v3::Response::new( diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index 115cba7c..cc0a0640 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -1,49 +1,54 @@ -use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, - delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, - get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, - update_backup_version, + delete_backup_keys_for_session, delete_backup_version, get_backup_info, + get_backup_keys, get_backup_keys_for_room, get_backup_keys_for_session, + get_latest_backup_info, update_backup_version, }, error::ErrorKind, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -pub async fn create_backup_version_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_backup_version_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let version = services() - .key_backups - .create_backup(sender_user, &body.algorithm)?; + let version = + services().key_backups.create_backup(sender_user, &body.algorithm)?; - Ok(create_backup_version::v3::Response { version }) + Ok(Ra(create_backup_version::v3::Response { + version, + })) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// -/// Update information about an existing backup. Only `auth_data` can be modified. -pub async fn update_backup_version_route( - body: Ruma, -) -> Result { +/// Update information about an existing backup. Only `auth_data` can be +/// modified. +pub(crate) async fn update_backup_version_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .key_backups - .update_backup(sender_user, &body.version, &body.algorithm)?; + services().key_backups.update_backup( + sender_user, + &body.version, + &body.algorithm, + )?; - Ok(update_backup_version::v3::Response {}) + Ok(Ra(update_backup_version::v3::Response {})) } /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -pub async fn get_latest_backup_info_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_latest_backup_info_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = services() @@ -54,20 +59,24 @@ pub async fn get_latest_backup_info_route( "Key backup does not exist.", ))?; - Ok(get_latest_backup_info::v3::Response { + Ok(Ra(get_latest_backup_info::v3::Response { algorithm, - count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(), + count: services() + .key_backups + .count_keys(sender_user, &version)? + .try_into() + .expect("count should fit in UInt"), etag: services().key_backups.get_etag(sender_user, &version)?, version, - }) + })) } /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -pub async fn get_backup_info_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_backup_info_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = services() .key_backups @@ -77,46 +86,45 @@ pub async fn get_backup_info_route( "Key backup does not exist.", ))?; - Ok(get_backup_info::v3::Response { + Ok(Ra(get_backup_info::v3::Response { algorithm, - count: (services() + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - version: body.version.to_owned(), - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + version: body.version.clone(), + })) } /// # `DELETE /_matrix/client/r0/room_keys/version/{version}` /// /// Delete an existing key backup. /// -/// - Deletes both information about the backup, as well as all key data related to the backup -pub async fn delete_backup_version_route( - body: Ruma, -) -> Result { +/// - Deletes both information about the backup, as well as all key data related +/// to the backup +pub(crate) async fn delete_backup_version_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .key_backups - .delete_backup(sender_user, &body.version)?; + services().key_backups.delete_backup(sender_user, &body.version)?; - Ok(delete_backup_version::v3::Response {}) + Ok(Ra(delete_backup_version::v3::Response {})) } /// # `PUT /_matrix/client/r0/room_keys/keys` /// /// Add the received backup keys to the database. /// -/// - Only manipulating the most recently created version of the backup is allowed +/// - Only manipulating the most recently created version of the backup is +/// allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_keys_route( - body: Ruma, -) -> Result { +pub(crate) async fn add_backup_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -127,7 +135,8 @@ pub async fn add_backup_keys_route( { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", + "You may only manipulate the most recently created version of the \ + backup.", )); } @@ -139,31 +148,31 @@ pub async fn add_backup_keys_route( room_id, session_id, key_data, - )? + )?; } } - Ok(add_backup_keys::v3::Response { - count: (services() + Ok(Ra(add_backup_keys::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Add the received backup keys to the database. /// -/// - Only manipulating the most recently created version of the backup is allowed +/// - Only manipulating the most recently created version of the backup is +/// allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_keys_for_room_route( - body: Ruma, -) -> Result { +pub(crate) async fn add_backup_keys_for_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -174,7 +183,8 @@ pub async fn add_backup_keys_for_room_route( { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", + "You may only manipulate the most recently created version of the \ + backup.", )); } @@ -185,30 +195,30 @@ pub async fn add_backup_keys_for_room_route( &body.room_id, session_id, key_data, - )? + )?; } - Ok(add_backup_keys_for_room::v3::Response { - count: (services() + Ok(Ra(add_backup_keys_for_room::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Add the received backup key to the database. /// -/// - Only manipulating the most recently created version of the backup is allowed +/// - Only manipulating the most recently created version of the backup is +/// allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_keys_for_session_route( - body: Ruma, -) -> Result { +pub(crate) async fn add_backup_keys_for_session_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -219,7 +229,8 @@ pub async fn add_backup_keys_for_session_route( { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", + "You may only manipulate the most recently created version of the \ + backup.", )); } @@ -231,116 +242,126 @@ pub async fn add_backup_keys_for_session_route( &body.session_data, )?; - Ok(add_backup_keys_for_session::v3::Response { - count: (services() + Ok(Ra(add_backup_keys_for_session::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } /// # `GET /_matrix/client/r0/room_keys/keys` /// /// Retrieves all keys from the backup. -pub async fn get_backup_keys_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_backup_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let rooms = services().key_backups.get_all(sender_user, &body.version)?; - Ok(get_backup_keys::v3::Response { rooms }) + Ok(Ra(get_backup_keys::v3::Response { + rooms, + })) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -pub async fn get_backup_keys_for_room_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_backup_keys_for_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sessions = services() - .key_backups - .get_room(sender_user, &body.version, &body.room_id)?; + let sessions = services().key_backups.get_room( + sender_user, + &body.version, + &body.room_id, + )?; - Ok(get_backup_keys_for_room::v3::Response { sessions }) + Ok(Ra(get_backup_keys_for_room::v3::Response { + sessions, + })) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -pub async fn get_backup_keys_for_session_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_backup_keys_for_session_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = services() .key_backups - .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? + .get_session( + sender_user, + &body.version, + &body.room_id, + &body.session_id, + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Backup key not found for this user's session.", ))?; - Ok(get_backup_keys_for_session::v3::Response { key_data }) + Ok(Ra(get_backup_keys_for_session::v3::Response { + key_data, + })) } /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. -pub async fn delete_backup_keys_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_backup_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .key_backups - .delete_all_keys(sender_user, &body.version)?; + services().key_backups.delete_all_keys(sender_user, &body.version)?; - Ok(delete_backup_keys::v3::Response { - count: (services() + Ok(Ra(delete_backup_keys::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -pub async fn delete_backup_keys_for_room_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_backup_keys_for_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .key_backups - .delete_room_keys(sender_user, &body.version, &body.room_id)?; + services().key_backups.delete_room_keys( + sender_user, + &body.version, + &body.room_id, + )?; - Ok(delete_backup_keys_for_room::v3::Response { - count: (services() + Ok(Ra(delete_backup_keys_for_room::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -pub async fn delete_backup_keys_for_session_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_backup_keys_for_session_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); services().key_backups.delete_room_key( @@ -350,13 +371,12 @@ pub async fn delete_backup_keys_for_session_route( &body.session_id, )?; - Ok(delete_backup_keys_for_session::v3::Response { - count: (services() + Ok(Ra(delete_backup_keys_for_session::v3::Response { + count: services() .key_backups - .count_keys(sender_user, &body.version)? as u32) - .into(), - etag: services() - .key_backups - .get_etag(sender_user, &body.version)?, - }) + .count_keys(sender_user, &body.version)? + .try_into() + .expect("count should fit in UInt"), + etag: services().key_backups.get_etag(sender_user, &body.version)?, + })) } diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 233e3c9c..f5542a10 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -1,19 +1,20 @@ -use crate::{services, Result, Ruma}; -use ruma::api::client::discovery::get_capabilities::{ - self, Capabilities, RoomVersionStability, RoomVersionsCapability, -}; use std::collections::BTreeMap; +use ruma::api::client::discovery::get_capabilities::{ + self, + v3::{Capabilities, RoomVersionStability, RoomVersionsCapability}, +}; + +use crate::{services, Ar, Ra, Result}; + /// # `GET /_matrix/client/r0/capabilities` /// -/// Get information on the supported feature set and other relevent capabilities of this server. -pub async fn get_capabilities_route( - _body: Ruma, -) -> Result { +/// Get information on the supported feature set and other relevent capabilities +/// of this server. +pub(crate) async fn get_capabilities_route( + _body: Ar, +) -> Result> { let mut available = BTreeMap::new(); - for room_version in &services().globals.unstable_room_versions { - available.insert(room_version.clone(), RoomVersionStability::Unstable); - } for room_version in &services().globals.stable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); } @@ -24,5 +25,7 @@ pub async fn get_capabilities_route( available, }; - Ok(get_capabilities::v3::Response { capabilities }) + Ok(Ra(get_capabilities::v3::Response { + capabilities, + })) } diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 37279e35..8abe8516 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,116 +1,80 @@ -use crate::{services, Error, Result, Ruma}; -use ruma::{ - api::client::{ - config::{ - get_global_account_data, get_room_account_data, set_global_account_data, - set_room_account_data, - }, - error::ErrorKind, +use ruma::api::client::{ + config::{ + get_global_account_data, get_room_account_data, + set_global_account_data, set_room_account_data, }, - events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, - serde::Raw, + error::ErrorKind, }; -use serde::Deserialize; -use serde_json::{json, value::RawValue as RawJsonValue}; + +use crate::{services, Ar, Error, Ra, Result}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -pub async fn set_global_account_data_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_global_account_data_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; - - let event_type = body.event_type.to_string(); - - services().account_data.update( - None, + services().account_data.update_global_any( sender_user, - event_type.clone().into(), - &json!({ - "type": event_type, - "content": data, - }), + &body.event_type, + &body.data, )?; - Ok(set_global_account_data::v3::Response {}) + Ok(Ra(set_global_account_data::v3::Response {})) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -pub async fn set_room_account_data_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_room_account_data_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; - - let event_type = body.event_type.to_string(); - - services().account_data.update( - Some(&body.room_id), + services().account_data.update_room_any( + &body.room_id, sender_user, - event_type.clone().into(), - &json!({ - "type": event_type, - "content": data, - }), + &body.event_type, + &body.data, )?; - Ok(set_room_account_data::v3::Response {}) + Ok(Ra(set_room_account_data::v3::Response {})) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -pub async fn get_global_account_data_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_global_account_data_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = services() + let account_data = services() .account_data - .get(None, sender_user, body.event_type.to_string().into())? + .get_global_any(sender_user, &body.event_type)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - - Ok(get_global_account_data::v3::Response { account_data }) + Ok(Ra(get_global_account_data::v3::Response { + account_data, + })) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -pub async fn get_room_account_data_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_room_account_data_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = services() + let account_data = services() .account_data - .get(Some(&body.room_id), sender_user, body.event_type.clone())? + .get_room_any(&body.room_id, sender_user, &body.event_type)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; - - Ok(get_room_account_data::v3::Response { account_data }) -} - -#[derive(Deserialize)] -struct ExtractRoomEventContent { - content: Raw, -} - -#[derive(Deserialize)] -struct ExtractGlobalEventContent { - content: Raw, + Ok(Ra(get_room_account_data::v3::Response { + account_data, + })) } diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 8e193e6b..ba803454 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -1,60 +1,58 @@ -use crate::{services, Error, Result, Ruma}; -use ruma::{ - api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, - events::StateEventType, -}; use std::collections::HashSet; + +use ruma::{ + api::client::{ + context::get_context, error::ErrorKind, filter::LazyLoadOptions, + }, + events::StateEventType, + uint, +}; use tracing::error; +use crate::{services, Ar, Error, Ra, Result}; + /// # `GET /_matrix/client/r0/rooms/{roomId}/context` /// /// Allows loading room history around an event. /// -/// - Only works if the user is joined (TODO: always allow, but only show events if the user was -/// joined, depending on history_visibility) -pub async fn get_context_route( - body: Ruma, -) -> Result { +/// - Only works if the user is joined (TODO: always allow, but only show events +/// if the user was joined, depending on `history_visibility`) +#[allow(clippy::too_many_lines)] +pub(crate) async fn get_context_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); - let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { - LazyLoadOptions::Enabled { - include_redundant_members, - } => (true, *include_redundant_members), - _ => (false, false), - }; + let (lazy_load_enabled, lazy_load_send_redundant) = + match &body.filter.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members, + } => (true, *include_redundant_members), + LazyLoadOptions::Disabled => (false, false), + }; let mut lazy_loaded = HashSet::new(); - let base_token = services() - .rooms - .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event id not found.", - ))?; + let base_token = + services().rooms.timeline.get_pdu_count(&body.event_id)?.ok_or( + Error::BadRequest(ErrorKind::NotFound, "Base event id not found."), + )?; - let base_event = - services() - .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event not found.", - ))?; + let base_event = services().rooms.timeline.get_pdu(&body.event_id)?.ok_or( + Error::BadRequest(ErrorKind::NotFound, "Base event not found."), + )?; let room_id = base_event.room_id.clone(); - if !services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &room_id, &body.event_id)? - { + if !services().rooms.state_accessor.user_can_see_event( + sender_user, + &room_id, + &body.event_id, + )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this event.", )); } @@ -70,7 +68,8 @@ pub async fn get_context_route( } // Use limit with maximum 100 - let limit = u64::from(body.limit).min(100) as usize; + let half_limit = usize::try_from(body.limit.min(uint!(100)) / uint!(2)) + .expect("0-50 should fit in usize"); let base_event = base_event.to_room_event(); @@ -78,8 +77,8 @@ pub async fn get_context_route( .rooms .timeline .pdus_until(sender_user, &room_id, base_token)? - .take(limit / 2) - .filter_map(|r| r.ok()) // Remove buggy events + .take(half_limit) + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms @@ -103,20 +102,17 @@ pub async fn get_context_route( let start_token = events_before .last() - .map(|(count, _)| count.stringify()) - .unwrap_or_else(|| base_token.stringify()); + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); + let events_before: Vec<_> = + events_before.into_iter().map(|(_, pdu)| pdu.to_room_event()).collect(); let events_after: Vec<_> = services() .rooms .timeline .pdus_after(sender_user, &room_id, base_token)? - .take(limit / 2) - .filter_map(|r| r.ok()) // Remove buggy events + .take(half_limit) + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms @@ -138,59 +134,46 @@ pub async fn get_context_route( } } - let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash( - events_after - .last() - .map_or(&*body.event_id, |(_, e)| &*e.event_id), - )? { - Some(s) => s, - None => services() - .rooms - .state - .get_room_shortstatehash(&room_id)? - .expect("All rooms have state"), - }; + let shortstatehash = + match services().rooms.state_accessor.pdu_shortstatehash( + events_after.last().map_or(&*body.event_id, |(_, e)| &*e.event_id), + )? { + Some(s) => s, + None => services() + .rooms + .state + .get_room_shortstatehash(&room_id)? + .expect("All rooms have state"), + }; - let state_ids = services() - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .await?; + let state_ids = + services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let end_token = events_after .last() - .map(|(count, _)| count.stringify()) - .unwrap_or_else(|| base_token.stringify()); + .map_or_else(|| base_token.stringify(), |(count, _)| count.stringify()); - let events_after: Vec<_> = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); + let events_after: Vec<_> = + events_after.into_iter().map(|(_, pdu)| pdu.to_room_event()).collect(); let mut state = Vec::new(); - for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services() - .rooms - .short - .get_statekey_from_short(shortstatekey)?; + for (shortstatekey, event_id) in state_ids { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } + let Some(pdu) = services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } + let Some(pdu) = services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; }; state.push(pdu.to_state_event()); } @@ -205,5 +188,5 @@ pub async fn get_context_route( state, }; - Ok(resp) + Ok(Ra(resp)) } diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index aba061b2..28130c00 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,35 +1,40 @@ -use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, + device::{ + self, delete_device, delete_devices, get_device, get_devices, + update_device, + }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }; use super::SESSION_ID_LENGTH; +use crate::{services, utils, Ar, Error, Ra, Result}; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -pub async fn get_devices_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_devices_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices: Vec = services() .users .all_devices_metadata(sender_user) - .filter_map(|r| r.ok()) // Filter out buggy devices + .filter_map(Result::ok) .collect(); - Ok(get_devices::v3::Response { devices }) + Ok(Ra(get_devices::v3::Response { + devices, + })) } /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -pub async fn get_device_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_device_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = services() @@ -37,15 +42,17 @@ pub async fn get_device_route( .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::v3::Response { device }) + Ok(Ra(get_device::v3::Response { + device, + })) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -pub async fn update_device_route( - body: Ruma, -) -> Result { +pub(crate) async fn update_device_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = services() @@ -53,13 +60,15 @@ pub async fn update_device_route( .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - device.display_name = body.display_name.clone(); + device.display_name.clone_from(&body.display_name); - services() - .users - .update_device_metadata(sender_user, &body.device_id, &device)?; + services().users.update_device_metadata( + sender_user, + &body.device_id, + &device, + )?; - Ok(update_device::v3::Response {}) + Ok(Ra(update_device::v3::Response {})) } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -68,14 +77,16 @@ pub async fn update_device_route( /// /// - Requires UIAA to verify user password /// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) /// - Forgets to-device events /// - Triggers device list updates -pub async fn delete_device_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_device_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -83,35 +94,33 @@ pub async fn delete_device_route( stages: vec![AuthType::Password], }], completed: Vec::new(), - params: Default::default(), + params: Some(Box::default()), session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - services() - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + let (worked, uiaainfo) = services().uiaa.try_auth( + sender_user, + sender_device, + auth, + &uiaainfo, + )?; if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err(Error::Uiaa(Box::new(uiaainfo))); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services() - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(Box::new(uiaainfo))); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services() - .users - .remove_device(sender_user, &body.device_id)?; + services().users.remove_device(sender_user, &body.device_id)?; - Ok(delete_device::v3::Response {}) + Ok(Ra(delete_device::v3::Response {})) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -122,14 +131,16 @@ pub async fn delete_device_route( /// /// For each device: /// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Deletes device metadata (device id, device display name, last seen ip, +/// last seen ts) /// - Forgets to-device events /// - Triggers device list updates -pub async fn delete_devices_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_devices_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -137,33 +148,33 @@ pub async fn delete_devices_route( stages: vec![AuthType::Password], }], completed: Vec::new(), - params: Default::default(), + params: Some(Box::default()), session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - services() - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + let (worked, uiaainfo) = services().uiaa.try_auth( + sender_user, + sender_device, + auth, + &uiaainfo, + )?; if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err(Error::Uiaa(Box::new(uiaainfo))); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services() - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(Box::new(uiaainfo))); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } for device_id in &body.devices { - services().users.remove_device(sender_user, device_id)? + services().users.remove_device(sender_user, device_id)?; } - Ok(delete_devices::v3::Response {}) + Ok(Ra(delete_devices::v3::Response {})) } diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 50ae9f15..ed95e034 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,10 +1,9 @@ -use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{ directory::{ - get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, + get_public_rooms, get_public_rooms_filtered, + get_room_visibility, set_room_visibility, }, error::ErrorKind, room, @@ -16,26 +15,31 @@ use ruma::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + history_visibility::{ + HistoryVisibility, RoomHistoryVisibilityEventContent, + }, join_rules::{JoinRule, RoomJoinRulesEventContent}, topic::RoomTopicEventContent, }, StateEventType, }, - ServerName, UInt, + uint, ServerName, UInt, }; use tracing::{error, info, warn}; +use crate::{ + service::rooms::state::ExtractType, services, Ar, Error, Ra, Result, +}; + /// # `POST /_matrix/client/r0/publicRooms` /// /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -pub async fn get_public_rooms_filtered_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_public_rooms_filtered_route( + body: Ar, +) -> Result> { get_public_rooms_filtered_helper( body.server.as_deref(), body.limit, @@ -44,6 +48,7 @@ pub async fn get_public_rooms_filtered_route( &body.room_network, ) .await + .map(Ra) } /// # `GET /_matrix/client/r0/publicRooms` @@ -51,9 +56,9 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -pub async fn get_public_rooms_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_public_rooms_route( + body: Ar, +) -> Result> { let response = get_public_rooms_filtered_helper( body.server.as_deref(), body.limit, @@ -63,12 +68,12 @@ pub async fn get_public_rooms_route( ) .await?; - Ok(get_public_rooms::v3::Response { + Ok(Ra(get_public_rooms::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - }) + })) } /// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` @@ -76,9 +81,9 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -pub async fn set_room_visibility_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_room_visibility_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services().rooms.metadata.exists(&body.room_id)? { @@ -89,9 +94,15 @@ pub async fn set_room_visibility_route( match &body.visibility { room::Visibility::Public => { services().rooms.directory.set_public(&body.room_id)?; - info!("{} made {} public", sender_user, body.room_id); + info!( + user_id = %sender_user, + room_id = %body.room_id, + "User made room public", + ); + } + room::Visibility::Private => { + services().rooms.directory.set_not_public(&body.room_id)?; } - room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -100,29 +111,34 @@ pub async fn set_room_visibility_route( } } - Ok(set_room_visibility::v3::Response {}) + Ok(Ra(set_room_visibility::v3::Response {})) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -pub async fn get_room_visibility_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_room_visibility_route( + body: Ar, +) -> Result> { if !services().rooms.metadata.exists(&body.room_id)? { // Return 404 if the room doesn't exist return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); } - Ok(get_room_visibility::v3::Response { - visibility: if services().rooms.directory.is_public_room(&body.room_id)? { + Ok(Ra(get_room_visibility::v3::Response { + visibility: if services() + .rooms + .directory + .is_public_room(&body.room_id)? + { room::Visibility::Public } else { room::Visibility::Private }, - }) + })) } +#[allow(clippy::too_many_lines)] pub(crate) async fn get_public_rooms_filtered_helper( server: Option<&ServerName>, limit: Option, @@ -130,8 +146,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( filter: &Filter, _network: &RoomNetwork, ) -> Result { - if let Some(other_server) = - server.filter(|server| *server != services().globals.server_name().as_str()) + if let Some(other_server) = server + .filter(|server| *server != services().globals.server_name().as_str()) { let response = services() .sending @@ -157,8 +173,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( }); } - let limit = limit.map_or(10, u64::from); - let mut num_since = 0_u64; + let limit = limit.unwrap_or(uint!(10)); + let mut num_since = UInt::MIN; if let Some(s) = &since { let mut characters = s.chars(); @@ -173,10 +189,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( } }; - num_since = characters - .collect::() - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + num_since = characters.collect::().parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token.") + })?; if backwards { num_since = num_since.saturating_sub(limit); @@ -187,129 +202,12 @@ pub(crate) async fn get_public_rooms_filtered_helper( .rooms .directory .public_rooms() - .map(|room_id| { - let room_id = room_id?; - - let chunk = PublicRoomsChunk { - canonical_alias: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomCanonicalAliasEventContent| c.alias) - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") - }) - })?, - name: services().rooms.state_accessor.get_name(&room_id)?, - num_joined_members: services() - .rooms - .state_cache - .room_joined_count(&room_id)? - .unwrap_or_else(|| { - warn!("Room {} has no member count", room_id); - 0 - }) - .try_into() - .expect("user count should not be that big"), - topic: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomTopic, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomTopicEventContent| Some(c.topic)) - .map_err(|_| { - error!("Invalid room topic event in database for room {}", room_id); - Error::bad_database("Invalid room topic event in database.") - }) - })?, - world_readable: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable - }) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", - ) - }) - })?, - guest_can_join: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? - .map_or(Ok(false), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomGuestAccessEventContent| { - c.guest_access == GuestAccess::CanJoin - }) - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") - }) - })?, - avatar_url: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomAvatarEventContent| c.url) - .map_err(|_| { - Error::bad_database("Invalid room avatar event in database.") - }) - }) - .transpose()? - // url is now an Option so we must flatten - .flatten(), - join_rule: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? - .map(|s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomJoinRulesEventContent| match c.join_rule { - JoinRule::Public => Some(PublicRoomJoinRule::Public), - JoinRule::Knock => Some(PublicRoomJoinRule::Knock), - _ => None, - }) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") - }) - }) - .transpose()? - .flatten() - .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, - room_type: services() - .rooms - .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? - .map(|s| { - serde_json::from_str::(s.content.get()).map_err( - |e| { - error!("Invalid room create event in database: {}", e); - Error::BadDatabase("Invalid room create event in database.") - }, - ) - }) - .transpose()? - .and_then(|e| e.room_type), - room_id, - }; - Ok(chunk) - }) - .filter_map(|r: Result<_>| r.ok()) // Filter out buggy rooms + .filter_map(Result::ok) + .map(room_id_to_chunk) + .filter_map(Result::ok) .filter(|chunk| { - if let Some(query) = filter - .generic_search_term - .as_ref() - .map(|q| q.to_lowercase()) + if let Some(query) = + filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { @@ -324,7 +222,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( } if let Some(canonical_alias) = &chunk.canonical_alias { - if canonical_alias.as_str().to_lowercase().contains(&query) { + if canonical_alias.as_str().to_lowercase().contains(&query) + { return true; } } @@ -335,30 +234,31 @@ pub(crate) async fn get_public_rooms_filtered_helper( true } }) - // We need to collect all, so we can sort by member count .collect(); all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = (all_rooms.len() as u32).into(); + let total_room_count_estimate = + all_rooms.len().try_into().unwrap_or(UInt::MAX); let chunk: Vec<_> = all_rooms .into_iter() - .skip(num_since as usize) - .take(limit as usize) + .skip(num_since.try_into().expect("UInt should fit in usize")) + .take(limit.try_into().expect("UInt should fit in usize")) .collect(); - let prev_batch = if num_since == 0 { + let prev_batch = if num_since == uint!(0) { None } else { Some(format!("p{num_since}")) }; - let next_batch = if chunk.len() < limit as usize { - None - } else { - Some(format!("n{}", num_since + limit)) - }; + let next_batch = + if chunk.len() < limit.try_into().expect("UInt should fit in usize") { + None + } else { + Some(format!("n{}", num_since + limit)) + }; Ok(get_public_rooms_filtered::v3::Response { chunk, @@ -367,3 +267,135 @@ pub(crate) async fn get_public_rooms_filtered_helper( total_room_count_estimate: Some(total_room_count_estimate), }) } + +#[allow(clippy::too_many_lines)] +#[tracing::instrument] +fn room_id_to_chunk(room_id: ruma::OwnedRoomId) -> Result { + let canonical_alias = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .map_err(|_| { + Error::bad_database( + "Invalid canonical alias event in database.", + ) + }) + })?; + + let name = services().rooms.state_accessor.get_name(&room_id)?; + + let num_joined_members = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room has no member count"); + 0 + }) + .try_into() + .expect("user count should not be that big"); + + let topic = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .map_or(Ok(None), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomTopicEventContent| Some(c.topic)) + .map_err(|_| { + error!("Invalid room topic event in database for room",); + Error::bad_database("Invalid room topic event in database.") + }) + })?; + + let world_readable = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database( + "Invalid room history visibility event in database.", + ) + }) + })?; + + let guest_can_join = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| { + c.guest_access == GuestAccess::CanJoin + }) + .map_err(|_| { + Error::bad_database( + "Invalid room guest access event in database.", + ) + }) + })?; + + let avatar_url = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .map_err(|_| { + Error::bad_database( + "Invalid room avatar event in database.", + ) + }) + }) + .transpose()? + .flatten(); + + let join_rule = services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => Some(PublicRoomJoinRule::Public), + JoinRule::Knock => Some(PublicRoomJoinRule::Knock), + _ => None, + }) + .map_err(|error| { + error!(%error, "Invalid room join rule event in database"); + Error::BadDatabase( + "Invalid room join rule event in database.", + ) + }) + }) + .transpose()? + .flatten() + .ok_or_else(|| { + Error::bad_database("Missing room join rule event for room.") + })?; + + let room_type = + services().rooms.state.get_create_content::(&room_id)?; + + Ok(PublicRoomsChunk { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + }) +} diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index e9a359d6..e99f8b17 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -1,34 +1,39 @@ -use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, filter::{create_filter, get_filter}, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// /// Loads a filter that was previously created. /// /// - A user can only access their own filters -pub async fn get_filter_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_filter_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let filter = match services().users.get_filter(sender_user, &body.filter_id)? { - Some(filter) => filter, - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), + let Some(filter) = + services().users.get_filter(sender_user, &body.filter_id)? + else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Filter not found.", + )); }; - Ok(get_filter::v3::Response::new(filter)) + Ok(Ra(get_filter::v3::Response::new(filter))) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// /// Creates a new filter to be used by other endpoints. -pub async fn create_filter_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_filter_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::v3::Response::new( + Ok(Ra(create_filter::v3::Response::new( services().users.create_filter(sender_user, &body.filter)?, - )) + ))) } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 4af8890d..5437a7da 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,44 +1,52 @@ -use super::SESSION_ID_LENGTH; -use crate::{services, utils, Error, Result, Ruma}; +use std::{ + collections::{hash_map, BTreeMap, HashMap, HashSet}, + time::{Duration, Instant}, +}; + use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::{ error::ErrorKind, keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, + claim_keys, get_key_changes, get_keys, upload_keys, + upload_signatures, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, }, serde::Raw, - DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedServerName, OwnedUserId, + ServerName, UserId, }; use serde_json::json; -use std::{ - collections::{hash_map, BTreeMap, HashMap, HashSet}, - time::{Duration, Instant}, -}; use tracing::debug; +use super::SESSION_ID_LENGTH; +use crate::{services, utils, Ar, Error, Ra, Result}; + /// # `POST /_matrix/client/r0/keys/upload` /// /// Publish end-to-end encryption keys for the sender device. /// /// - Adds one time keys -/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -pub async fn upload_keys_route( - body: Ruma, -) -> Result { +/// - If there are no device keys yet: Adds device keys (TODO: merge with +/// existing keys?) +pub(crate) async fn upload_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); for (key_key, key_value) in &body.one_time_keys { - services() - .users - .add_one_time_key(sender_user, sender_device, key_key, key_value)?; + services().users.add_one_time_key( + sender_user, + sender_device, + key_key, + key_value, + )?; } if let Some(device_keys) = &body.device_keys { @@ -49,17 +57,19 @@ pub async fn upload_keys_route( .get_device_keys(sender_user, sender_device)? .is_none() { - services() - .users - .add_device_keys(sender_user, sender_device, device_keys)?; + services().users.add_device_keys( + sender_user, + sender_device, + device_keys, + )?; } } - Ok(upload_keys::v3::Response { + Ok(Ra(upload_keys::v3::Response { one_time_key_counts: services() .users .count_one_time_keys(sender_user, sender_device)?, - }) + })) } /// # `POST /_matrix/client/r0/keys/query` @@ -68,25 +78,30 @@ pub async fn upload_keys_route( /// /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. -/// - The master and self-signing keys contain signatures that the user is allowed to see -pub async fn get_keys_route(body: Ruma) -> Result { +/// - The master and self-signing keys contain signatures that the user is +/// allowed to see +pub(crate) async fn get_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let response = - get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?; + let response = get_keys_helper(Some(sender_user), &body.device_keys, |u| { + u == sender_user + }) + .await?; - Ok(response) + Ok(Ra(response)) } /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -pub async fn claim_keys_route( - body: Ruma, -) -> Result { +pub(crate) async fn claim_keys_route( + body: Ar, +) -> Result> { let response = claim_keys_helper(&body.one_time_keys).await?; - Ok(response) + Ok(Ra(response)) } /// # `POST /_matrix/client/r0/keys/device_signing/upload` @@ -94,11 +109,12 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -pub async fn upload_signing_keys_route( - body: Ruma, -) -> Result { +pub(crate) async fn upload_signing_keys_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); // UIAA let mut uiaainfo = UiaaInfo { @@ -106,26 +122,26 @@ pub async fn upload_signing_keys_route( stages: vec![AuthType::Password], }], completed: Vec::new(), - params: Default::default(), + params: Some(Box::default()), session: None, auth_error: None, }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = - services() - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + let (worked, uiaainfo) = services().uiaa.try_auth( + sender_user, + sender_device, + auth, + &uiaainfo, + )?; if !worked { - return Err(Error::Uiaa(uiaainfo)); + return Err(Error::Uiaa(Box::new(uiaainfo))); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services() - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); + services().uiaa.create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(Box::new(uiaainfo))); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } @@ -134,27 +150,29 @@ pub async fn upload_signing_keys_route( services().users.add_cross_signing_keys( sender_user, master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys + body.self_signing_key.as_ref(), + body.user_signing_key.as_ref(), + // notify so that other users see the new keys + true, )?; } - Ok(upload_signing_keys::v3::Response {}) + Ok(Ra(upload_signing_keys::v3::Response {})) } /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -pub async fn upload_signatures_route( - body: Ruma, -) -> Result { +pub(crate) async fn upload_signatures_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { - let key = serde_json::to_value(key) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let key = serde_json::to_value(key).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON") + })?; for signature in key .get("signatures") @@ -173,7 +191,6 @@ pub async fn upload_signatures_route( "Invalid signature.", ))? .clone() - .into_iter() { // Signature validation? let signature = ( @@ -187,26 +204,31 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - services() - .users - .sign_key(user_id, key_id, signature, sender_user)?; + services().users.sign_key( + user_id, + key_id, + signature, + sender_user, + )?; } } } - Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate - }) + Ok(Ra(upload_signatures::v3::Response { + // TODO: integrate + failures: BTreeMap::new(), + })) } /// # `POST /_matrix/client/r0/keys/changes` /// -/// Gets a list of users who have updated their device identity keys since the previous sync token. +/// Gets a list of users who have updated their device identity keys since the +/// previous sync token. /// /// - TODO: left users -pub async fn get_key_changes_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_key_changes_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); @@ -216,23 +238,24 @@ pub async fn get_key_changes_route( .users .keys_changed( sender_user.as_str(), - body.from - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, - Some( - body.to - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`."))?, - ), + body.from.parse().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid `from`.", + ) + })?, + Some(body.to.parse().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.") + })?), ) - .filter_map(|r| r.ok()), + .filter_map(Result::ok), ); for room_id in services() .rooms .state_cache .rooms_joined(sender_user) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) { device_list_updates.extend( services() @@ -240,21 +263,29 @@ pub async fn get_key_changes_route( .keys_changed( room_id.as_ref(), body.from.parse().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid `from`.", + ) })?, Some(body.to.parse().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid `to`.") + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid `to`.", + ) })?), ) - .filter_map(|r| r.ok()), + .filter_map(Result::ok), ); } - Ok(get_key_changes::v3::Response { + Ok(Ra(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), - left: Vec::new(), // TODO - }) + // TODO + left: Vec::new(), + })) } +#[allow(clippy::too_many_lines)] pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, device_keys_input: &BTreeMap>, @@ -282,16 +313,24 @@ pub(crate) async fn get_keys_helper bool>( let mut container = BTreeMap::new(); for device_id in services().users.all_device_ids(user_id) { let device_id = device_id?; - if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? { + if let Some(mut keys) = + services().users.get_device_keys(user_id, &device_id)? + { let metadata = services() .users .get_device_metadata(user_id, &device_id)? .ok_or_else(|| { - Error::bad_database("all_device_keys contained nonexistent device.") + Error::bad_database( + "all_device_keys contained nonexistent device.", + ) })?; add_unsigned_device_display_name(&mut keys, metadata) - .map_err(|_| Error::bad_database("invalid device keys in database"))?; + .map_err(|_| { + Error::bad_database( + "invalid device keys in database", + ) + })?; container.insert(device_id, keys); } } @@ -299,7 +338,9 @@ pub(crate) async fn get_keys_helper bool>( } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { + if let Some(mut keys) = + services().users.get_device_keys(user_id, device_id)? + { let metadata = services() .users .get_device_metadata(user_id, device_id)? @@ -309,29 +350,35 @@ pub(crate) async fn get_keys_helper bool>( ))?; add_unsigned_device_display_name(&mut keys, metadata) - .map_err(|_| Error::bad_database("invalid device keys in database"))?; + .map_err(|_| { + Error::bad_database( + "invalid device keys in database", + ) + })?; container.insert(device_id.to_owned(), keys); } device_keys.insert(user_id.to_owned(), container); } } - if let Some(master_key) = - services() - .users - .get_master_key(sender_user, user_id, &allowed_signatures)? - { + if let Some(master_key) = services().users.get_master_key( + sender_user, + user_id, + &allowed_signatures, + )? { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = - services() - .users - .get_self_signing_key(sender_user, user_id, &allowed_signatures)? - { + if let Some(self_signing_key) = services().users.get_self_signing_key( + sender_user, + user_id, + &allowed_signatures, + )? { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { - if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? { + if let Some(user_signing_key) = + services().users.get_user_signing_key(user_id)? + { user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } @@ -339,123 +386,167 @@ pub(crate) async fn get_keys_helper bool>( let mut failures = BTreeMap::new(); - let back_off = |id| async { - match services() - .globals - .bad_query_ratelimiter - .write() - .await - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - } - }; - let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() - .map(|(server, vec)| async move { - if let Some((time, tries)) = services() - .globals - .bad_query_ratelimiter - .read() - .await - .get(server) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off query from {:?}", server); - return ( - server, - Err(Error::BadServerResponse("bad query, still backing off")), - ); - } - } - - let mut device_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); - } - ( - server, - tokio::time::timeout( - Duration::from_secs(25), - services().sending.send_federation_request( - server, - federation::keys::get_keys::v1::Request { - device_keys: device_keys_input_fed, - }, - ), - ) - .await - .map_err(|_e| Error::BadServerResponse("Query took too long")), - ) + .map(|(server, keys)| async move { + (server, request_keys_from(server, keys).await) }) .collect(); while let Some((server, response)) = futures.next().await { - match response { - Ok(Ok(response)) => { - for (user, masterkey) in response.master_keys { - let (master_key_id, mut master_key) = - services().users.parse_master_key(&user, &masterkey)?; + let Ok(response) = response else { + failures.insert(server.to_string(), json!({})); + continue; + }; - if let Some(our_master_key) = services().users.get_key( - &master_key_id, - sender_user, - &user, - &allowed_signatures, - )? { - let (_, our_master_key) = - services().users.parse_master_key(&user, &our_master_key)?; - master_key.signatures.extend(our_master_key.signatures); + for (user, masterkey) in response.master_keys { + let (master_key_id, mut master_key) = + services().users.parse_master_key(&user, &masterkey)?; + + if let Some(our_master_key) = services().users.get_key( + &master_key_id, + sender_user, + &user, + &allowed_signatures, + )? { + let (_, our_master_key) = services() + .users + .parse_master_key(&user, &our_master_key)?; + + for (entity, v) in &*our_master_key.signatures { + for (key_identifier, value) in v { + master_key.signatures.insert_signature( + entity.clone(), + key_identifier.clone(), + value.clone(), + ); } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services().users.add_cross_signing_keys( - &user, &raw, &None, &None, - false, // Dont notify. A notification would trigger another key request resulting in an endless loop - )?; - master_keys.insert(user, raw); } - - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - } - _ => { - back_off(server.to_owned()).await; - - failures.insert(server.to_string(), json!({})); } + let json = serde_json::to_value(master_key) + .expect("to_value always works"); + let raw = serde_json::from_value(json) + .expect("Raw::from_value always works"); + services().users.add_cross_signing_keys( + &user, &raw, None, None, + // Dont notify. A notification would trigger another key + // request resulting in an endless loop + false, + )?; + master_keys.insert(user, raw); } + + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); } Ok(get_keys::v3::Response { + failures, + device_keys, master_keys, self_signing_keys, user_signing_keys, - device_keys, - failures, }) } +/// Returns `Err` if key requests to the server are being backed off due to +/// previous errors. +async fn check_key_requests_back_off(server: &ServerName) -> Result<()> { + if let Some((time, tries)) = + services().globals.bad_query_ratelimiter.read().await.get(server) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if let Some(remaining) = + min_elapsed_duration.checked_sub(time.elapsed()) + { + debug!(%server, %tries, ?remaining, "Backing off from server"); + return Err(Error::BadServerResponse( + "bad query, still backing off", + )); + } + } + Ok(()) +} + +/// Backs off future remote device key requests to a server after a failure. +async fn back_off_key_requests(server: OwnedServerName) { + match services().globals.bad_query_ratelimiter.write().await.entry(server) { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1); + } + } +} + +/// Stops backing off remote device key requests to a server after a success. +async fn reset_key_request_back_off(server: &ServerName) { + services().globals.bad_query_ratelimiter.write().await.remove(server); +} + +/// Requests device keys from a remote server, unless the server is in backoff. +/// +/// Updates backoff state depending on the result of the request. +async fn request_keys_from( + server: &ServerName, + keys: Vec<(&UserId, &Vec)>, +) -> Result { + check_key_requests_back_off(server).await?; + + let result = request_keys_from_inner(server, keys).await; + match &result { + Ok(_) => reset_key_request_back_off(server).await, + Err(error) => { + debug!(%server, %error, "remote device key query failed"); + back_off_key_requests(server.to_owned()).await; + } + } + result +} + +async fn request_keys_from_inner( + server: &ServerName, + keys: Vec<(&UserId, &Vec)>, +) -> Result { + let mut device_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in keys { + device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); + } + // TODO: switch .and_then(|result| result) to .flatten() when stable + // + tokio::time::timeout( + Duration::from_secs(25), + services().sending.send_federation_request( + server, + federation::keys::get_keys::v1::Request { + device_keys: device_keys_input_fed, + }, + ), + ) + .await + .map_err(|_e| Error::BadServerResponse("Query took too long")) + .and_then(|result| result) +} + fn add_unsigned_device_display_name( keys: &mut Raw, metadata: ruma::api::client::device::Device, ) -> serde_json::Result<()> { if let Some(display_name) = metadata.display_name { - let mut object = keys.deserialize_as::>()?; + let mut object = keys + .deserialize_as::>()?; let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); if let serde_json::Value::Object(unsigned_object) = unsigned { - unsigned_object.insert("device_display_name".to_owned(), display_name.into()); + unsigned_object + .insert("device_display_name".to_owned(), display_name.into()); } *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); @@ -465,7 +556,10 @@ fn add_unsigned_device_display_name( } pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap>, + one_time_keys_input: &BTreeMap< + OwnedUserId, + BTreeMap, + >, ) -> Result { let mut one_time_keys = BTreeMap::new(); @@ -481,11 +575,11 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { - if let Some(one_time_keys) = - services() - .users - .take_one_time_key(user_id, device_id, key_algorithm)? - { + if let Some(one_time_keys) = services().users.take_one_time_key( + user_id, + device_id, + key_algorithm, + )? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); container.insert(device_id.clone(), c); diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 7fc65c26..f7ffa413 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,25 +1,147 @@ use std::time::Duration; -use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - media::{ - create_content, get_content, get_content_as_filename, get_content_thumbnail, - get_media_config, +use axum::response::IntoResponse; +use http::{ + header::{CONTENT_DISPOSITION, CONTENT_SECURITY_POLICY, CONTENT_TYPE}, + HeaderName, HeaderValue, Method, +}; +use phf::{phf_set, Set}; +use ruma::{ + api::{ + client::{ + authenticated_media as authenticated_media_client, + error::ErrorKind, + media::{self as legacy_media, create_content}, + }, + federation::authenticated_media as authenticated_media_fed, }, + http_headers::{ContentDisposition, ContentDispositionType}, +}; +use tracing::{debug, error, info, warn}; + +use crate::{ + service::media::FileMeta, + services, + utils::{self, MxcData}, + Ar, Error, Ra, Result, }; const MXC_LENGTH: usize = 32; +/// `Content-Type`s that can be rendered inline in a browser without risking XSS +/// +/// Cargo-culted from Synapse. Note that SVG can contain inline JavaScript. +static INLINE_CONTENT_TYPES: Set<&str> = phf_set! { + // Keep sorted + "application/json", + "application/ld+json", + "audio/aac", + "audio/flac", + "audio/mp4", + "audio/mpeg", + "audio/ogg", + "audio/wav", + "audio/wave", + "audio/webm", + "audio/x-flac", + "audio/x-pn-wav", + "audio/x-wav", + "image/apng", + "image/avif", + "image/gif", + "image/jpeg", + "image/png", + "image/webp", + "text/css", + "text/csv", + "text/plain", + "video/mp4", + "video/ogg", + "video/quicktime", + "video/webm", +}; + +/// Value for the `Content-Security-Policy` header +/// +/// Cargo-culted from Synapse. +fn content_security_policy() -> HeaderValue { + [ + "sandbox", + "default-src 'none'", + "script-src 'none'", + "plugin-types application/pdf", + "style-src 'unsafe-inline'", + "media-src 'self'", + "object-src 'self'", + ] + .join("; ") + .try_into() + .expect("hardcoded header value should be valid") +} + +/// Determine a `Content-Disposition` header that prevents XSS +// TODO: In some of the places this function is called, we could parse the +// desired filename out of an existing `Content-Disposition` header value, such +// as what we're storing in the database or what we receive over federation. +// Doing this correctly is tricky, so I'm skipping it for now. +fn content_disposition_for( + content_type: Option<&str>, + filename: Option, +) -> ContentDisposition { + let disposition_type = match content_type { + Some(x) if INLINE_CONTENT_TYPES.contains(x) => { + ContentDispositionType::Inline + } + _ => ContentDispositionType::Attachment, + }; + ContentDisposition { + disposition_type, + filename, + } +} + +/// Set a header, but panic if it was already set +/// +/// # Panics +/// +/// Panics if the header was already set. +fn set_header_or_panic( + response: &mut axum::response::Response, + header_name: HeaderName, + header_value: HeaderValue, +) { + if let Some(header_value) = response.headers().get(&header_name) { + error!(?header_name, ?header_value, "unexpected pre-existing header"); + panic!( + "expected {header_name:?} to be unset but it was set to \ + {header_value:?}" + ); + } + + response.headers_mut().insert(header_name, header_value); +} + /// # `GET /_matrix/media/r0/config` /// /// Returns max upload size. -pub async fn get_media_config_route( - _body: Ruma, -) -> Result { - Ok(get_media_config::v3::Response { - upload_size: services().globals.max_request_size().into(), - }) +#[allow(deprecated)] // unauthenticated media +pub(crate) async fn get_media_config_legacy_route( + _body: Ar, +) -> Result> { + Ok(Ra(legacy_media::get_media_config::v3::Response { + upload_size: services().globals.max_request_size(), + })) +} + +/// # `GET /_matrix/client/v1/media/config` +/// +/// Returns max upload size. +pub(crate) async fn get_media_config_route( + _body: Ar, +) -> Result> { + Ok(Ra(authenticated_media_client::get_media_config::v1::Response { + upload_size: services().globals.max_request_size(), + })) } /// # `POST /_matrix/media/r0/upload` @@ -28,64 +150,214 @@ pub async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -pub async fn create_content_route( - body: Ruma, -) -> Result { - let mxc = format!( - "mxc://{}/{}", - services().globals.server_name(), - utils::random_string(MXC_LENGTH) - ); +pub(crate) async fn create_content_route( + body: Ar, +) -> Result> { + let media_id = utils::random_string(MXC_LENGTH); + let mxc = MxcData::new(services().globals.server_name(), &media_id)?; services() .media .create( - mxc.clone(), + mxc.clone().into(), body.filename - .as_ref() - .map(|filename| "inline; filename=".to_owned() + filename) - .as_deref(), - body.content_type.as_deref(), + .clone() + .map(|filename| ContentDisposition { + disposition_type: ContentDispositionType::Inline, + filename: Some(filename), + }) + .as_ref(), + body.content_type.clone(), &body.file, ) .await?; - Ok(create_content::v3::Response { + Ok(Ra(create_content::v3::Response { content_uri: mxc.into(), blurhash: None, + })) +} + +/// Whether or not to allow remote content to be loaded +#[derive(Clone, Copy, PartialEq, Eq)] +enum AllowRemote { + Yes, + No, +} + +impl From for AllowRemote { + fn from(allow: bool) -> Self { + if allow { + Self::Yes + } else { + Self::No + } + } +} + +struct RemoteResponse { + #[allow(unused)] + metadata: authenticated_media_fed::ContentMetadata, + content: authenticated_media_fed::Content, +} + +/// Fetches remote media content from a URL specified in a +/// `/_matrix/federation/v1/media/*/{mediaId}` `Location` header +#[tracing::instrument] +async fn get_redirected_content( + location: String, +) -> Result { + let location = location.parse().map_err(|error| { + warn!(location, %error, "Invalid redirect location"); + Error::BadServerResponse("Invalid redirect location") + })?; + let response = services() + .globals + .federation_client() + .execute(reqwest::Request::new(Method::GET, location)) + .await?; + + let content_type = response + .headers() + .get(CONTENT_TYPE) + .map(|value| { + value.to_str().map_err(|error| { + error!( + ?value, + %error, + "Invalid Content-Type header" + ); + Error::BadServerResponse("Invalid Content-Type header") + }) + }) + .transpose()? + .map(str::to_owned); + + let content_disposition = response + .headers() + .get(CONTENT_DISPOSITION) + .map(|value| { + ContentDisposition::try_from(value.as_bytes()).map_err(|error| { + error!( + ?value, + %error, + "Invalid Content-Disposition header" + ); + Error::BadServerResponse("Invalid Content-Disposition header") + }) + }) + .transpose()?; + + Ok(authenticated_media_fed::Content { + file: response.bytes().await?.to_vec(), + content_type, + content_disposition, }) } -pub async fn get_remote_content( - mxc: &str, - server_name: &ruma::ServerName, - media_id: String, -) -> Result { +#[tracing::instrument(skip_all)] +async fn get_remote_content_via_federation_api( + mxc: &MxcData<'_>, +) -> Result { + let authenticated_media_fed::get_content::v1::Response { + metadata, + content, + } = services() + .sending + .send_federation_request( + mxc.server_name, + authenticated_media_fed::get_content::v1::Request { + media_id: mxc.media_id.to_owned(), + timeout_ms: Duration::from_secs(20), + }, + ) + .await?; + + let content = match content { + authenticated_media_fed::FileOrLocation::File(content) => { + debug!("Got media from remote server"); + content + } + authenticated_media_fed::FileOrLocation::Location(location) => { + debug!(location, "Following redirect"); + get_redirected_content(location).await? + } + }; + + Ok(RemoteResponse { + metadata, + content, + }) +} + +#[allow(deprecated)] // unauthenticated media +#[tracing::instrument(skip_all)] +async fn get_remote_content_via_legacy_api( + mxc: &MxcData<'_>, +) -> Result { let content_response = services() .sending .send_federation_request( - server_name, - get_content::v3::Request { + mxc.server_name, + legacy_media::get_content::v3::Request { allow_remote: false, - server_name: server_name.to_owned(), - media_id, + server_name: mxc.server_name.to_owned(), + media_id: mxc.media_id.to_owned(), timeout_ms: Duration::from_secs(20), allow_redirect: false, }, ) .await?; + Ok(RemoteResponse { + metadata: authenticated_media_fed::ContentMetadata {}, + content: authenticated_media_fed::Content { + file: content_response.file, + content_disposition: content_response.content_disposition, + content_type: content_response.content_type, + }, + }) +} + +#[tracing::instrument] +async fn get_remote_content( + mxc: &MxcData<'_>, +) -> Result { + let fed_result = get_remote_content_via_federation_api(mxc).await; + + let response = match fed_result { + Ok(response) => { + debug!("Got remote content via authenticated media API"); + response + } + Err(Error::Federation(_, error)) + if error.error_kind() == Some(&ErrorKind::Unrecognized) + // https://github.com/t2bot/matrix-media-repo/issues/609 + || error.error_kind() == Some(&ErrorKind::Unauthorized) => + { + info!( + "Remote server does not support authenticated media, falling \ + back to deprecated API" + ); + + get_remote_content_via_legacy_api(mxc).await? + } + Err(e) => { + return Err(e); + } + }; + services() .media .create( - mxc.to_owned(), - content_response.content_disposition.as_deref(), - content_response.content_type.as_deref(), - &content_response.file, + mxc.clone().into(), + response.content.content_disposition.as_ref(), + response.content.content_type.clone(), + &response.content.file, ) .await?; - Ok(content_response) + Ok(response) } /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` @@ -93,29 +365,120 @@ pub async fn get_remote_content( /// Load media from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -pub async fn get_content_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); +#[allow(deprecated)] // unauthenticated media +pub(crate) async fn get_content_legacy_route( + body: Ar, +) -> Result { + use authenticated_media_client::get_content::v1::{ + Request as AmRequest, Response as AmResponse, + }; + use legacy_media::get_content::v3::{ + Request as LegacyRequest, Response as LegacyResponse, + }; - if let Some(FileMeta { - content_disposition, - content_type, - file, - }) = services().media.get(mxc.clone()).await? - { - Ok(get_content::v3::Response { + fn convert_request( + LegacyRequest { + server_name, + media_id, + timeout_ms, + .. + }: LegacyRequest, + ) -> AmRequest { + AmRequest { + server_name, + media_id, + timeout_ms, + } + } + + fn convert_response( + AmResponse { + file, + content_type, + content_disposition, + }: AmResponse, + ) -> LegacyResponse { + LegacyResponse { file, content_type, content_disposition, cross_origin_resource_policy: Some("cross-origin".to_owned()), + } + } + + let allow_remote = body.allow_remote.into(); + + get_content_route_ruma(body.map_body(convert_request), allow_remote) + .await + .map(|response| { + let response = convert_response(response); + let mut r = Ra(response).into_response(); + + set_header_or_panic( + &mut r, + CONTENT_SECURITY_POLICY, + content_security_policy(), + ); + + r + }) +} + +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +pub(crate) async fn get_content_route( + body: Ar, +) -> Result { + get_content_route_ruma(body, AllowRemote::Yes).await.map(|x| { + let mut r = Ra(x).into_response(); + + set_header_or_panic( + &mut r, + CONTENT_SECURITY_POLICY, + content_security_policy(), + ); + + r + }) +} + +async fn get_content_route_ruma( + body: Ar, + allow_remote: AllowRemote, +) -> Result { + let mxc = MxcData::new(&body.server_name, &body.media_id)?; + + if let Some(( + FileMeta { + content_type, + .. + }, + file, + )) = services().media.get(mxc.clone().into()).await? + { + Ok(authenticated_media_client::get_content::v1::Response { + file, + content_disposition: Some(content_disposition_for( + content_type.as_deref(), + None, + )), + content_type, + }) + } else if &*body.server_name != services().globals.server_name() + && allow_remote == AllowRemote::Yes + { + let remote_response = get_remote_content(&mxc).await?; + Ok(authenticated_media_client::get_content::v1::Response { + file: remote_response.content.file, + content_disposition: Some(content_disposition_for( + remote_response.content.content_type.as_deref(), + None, + )), + content_type: remote_response.content.content_type, }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { - let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; - Ok(remote_content_response) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + Err(Error::BadRequest(ErrorKind::NotYetUploaded, "Media not found.")) } } @@ -124,100 +487,433 @@ pub async fn get_content_route( /// Load media from our server or over federation, permitting desired filename. /// /// - Only allows federation if `allow_remote` is true -pub async fn get_content_as_filename_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); +#[allow(deprecated)] // unauthenticated media +pub(crate) async fn get_content_as_filename_legacy_route( + body: Ar, +) -> Result { + use authenticated_media_client::get_content_as_filename::v1::{ + Request as AmRequest, Response as AmResponse, + }; + use legacy_media::get_content_as_filename::v3::{ + Request as LegacyRequest, Response as LegacyResponse, + }; - if let Some(FileMeta { - content_disposition: _, - content_type, - file, - }) = services().media.get(mxc.clone()).await? - { - Ok(get_content_as_filename::v3::Response { + fn convert_request( + LegacyRequest { + server_name, + media_id, + filename, + timeout_ms, + .. + }: LegacyRequest, + ) -> AmRequest { + AmRequest { + server_name, + media_id, + filename, + timeout_ms, + } + } + + fn convert_response( + AmResponse { file, content_type, - content_disposition: Some(format!("inline; filename={}", body.filename)), + content_disposition, + }: AmResponse, + ) -> LegacyResponse { + LegacyResponse { + file, + content_type, + content_disposition, cross_origin_resource_policy: Some("cross-origin".to_owned()), - }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { - let remote_content_response = - get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?; + } + } - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(format!("inline: filename={}", body.filename)), - content_type: remote_content_response.content_type, - file: remote_content_response.file, - cross_origin_resource_policy: Some("cross-origin".to_owned()), + let allow_remote = body.allow_remote.into(); + get_content_as_filename_route_ruma( + body.map_body(convert_request), + allow_remote, + ) + .await + .map(|response| { + let response = convert_response(response); + let mut r = Ra(response).into_response(); + + set_header_or_panic( + &mut r, + CONTENT_SECURITY_POLICY, + content_security_policy(), + ); + + r + }) +} + +/// # `GET /_matrix/client/v1/media/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +pub(crate) async fn get_content_as_filename_route( + body: Ar, +) -> Result { + get_content_as_filename_route_ruma(body, AllowRemote::Yes).await.map(|x| { + let mut r = Ra(x).into_response(); + + set_header_or_panic( + &mut r, + CONTENT_SECURITY_POLICY, + content_security_policy(), + ); + + r + }) +} + +async fn get_content_as_filename_route_ruma( + body: Ar, + allow_remote: AllowRemote, +) -> Result { + let mxc = MxcData::new(&body.server_name, &body.media_id)?; + + if let Some(( + FileMeta { + content_type, + .. + }, + file, + )) = services().media.get(mxc.clone().into()).await? + { + Ok(authenticated_media_client::get_content_as_filename::v1::Response { + file, + content_disposition: Some(content_disposition_for( + content_type.as_deref(), + Some(body.filename.clone()), + )), + content_type, + }) + } else if &*body.server_name != services().globals.server_name() + && allow_remote == AllowRemote::Yes + { + let remote_response = get_remote_content(&mxc).await?; + + Ok(authenticated_media_client::get_content_as_filename::v1::Response { + content_disposition: Some(content_disposition_for( + remote_response.content.content_type.as_deref(), + Some(body.filename.clone()), + )), + content_type: remote_response.content.content_type, + file: remote_response.content.file, }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } } +fn fix_thumbnail_headers(r: &mut axum::response::Response) { + let content_type = r + .headers() + .get(CONTENT_TYPE) + .and_then(|x| std::str::from_utf8(x.as_ref()).ok()) + .map(ToOwned::to_owned); + + set_header_or_panic(r, CONTENT_SECURITY_POLICY, content_security_policy()); + set_header_or_panic( + r, + CONTENT_DISPOSITION, + content_disposition_for(content_type.as_deref(), None) + .to_string() + .try_into() + .expect("generated header value should be valid"), + ); +} + /// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -pub async fn get_content_thumbnail_route( - body: Ruma, -) -> Result { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); +#[allow(deprecated)] // unauthenticated media +pub(crate) async fn get_content_thumbnail_legacy_route( + body: Ar, +) -> Result { + use authenticated_media_client::get_content_thumbnail::v1::{ + Request as AmRequest, Response as AmResponse, + }; + use legacy_media::get_content_thumbnail::v3::{ + Request as LegacyRequest, Response as LegacyResponse, + }; - if let Some(FileMeta { - content_type, file, .. - }) = services() - .media - .get_thumbnail( - mxc.clone(), - body.width - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - body.height - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - ) - .await? - { - Ok(get_content_thumbnail::v3::Response { + fn convert_request( + LegacyRequest { + server_name, + media_id, + method, + width, + height, + timeout_ms, + animated, + .. + }: LegacyRequest, + ) -> AmRequest { + AmRequest { + server_name, + media_id, + method, + width, + height, + timeout_ms, + animated, + } + } + + fn convert_response( + AmResponse { file, content_type, + .. + }: AmResponse, + ) -> LegacyResponse { + LegacyResponse { + file, + content_type, + content_disposition: None, cross_origin_resource_policy: Some("cross-origin".to_owned()), - }) - } else if &*body.server_name != services().globals.server_name() && body.allow_remote { - let get_thumbnail_response = services() - .sending - .send_federation_request( - &body.server_name, - get_content_thumbnail::v3::Request { - allow_remote: false, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: body.server_name.clone(), - media_id: body.media_id.clone(), - timeout_ms: Duration::from_secs(20), - allow_redirect: false, - }, - ) - .await?; - - services() - .media - .upload_thumbnail( - mxc, - None, - get_thumbnail_response.content_type.as_deref(), - body.width.try_into().expect("all UInts are valid u32s"), - body.height.try_into().expect("all UInts are valid u32s"), - &get_thumbnail_response.file, - ) - .await?; - - Ok(get_thumbnail_response) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } } + + let allow_remote = body.allow_remote.into(); + + get_content_thumbnail_route_ruma( + body.map_body(convert_request), + allow_remote, + ) + .await + .map(|response| { + let response = convert_response(response); + let mut r = Ra(response).into_response(); + + fix_thumbnail_headers(&mut r); + + r + }) +} + +/// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +pub(crate) async fn get_content_thumbnail_route( + body: Ar, +) -> Result { + get_content_thumbnail_route_ruma(body, AllowRemote::Yes).await.map(|x| { + let mut r = Ra(x).into_response(); + + fix_thumbnail_headers(&mut r); + + r + }) +} + +#[tracing::instrument(skip_all)] +async fn get_remote_thumbnail_via_federation_api( + server_name: &ruma::ServerName, + request: authenticated_media_fed::get_content_thumbnail::v1::Request, +) -> Result { + let authenticated_media_fed::get_content_thumbnail::v1::Response { + metadata, + content, + } = services() + .sending + .send_federation_request(server_name, request) + .await?; + + let content = match content { + authenticated_media_fed::FileOrLocation::File(content) => { + debug!("Got thumbnail from remote server"); + content + } + authenticated_media_fed::FileOrLocation::Location(location) => { + debug!(location, "Following redirect"); + get_redirected_content(location).await? + } + }; + + Ok(RemoteResponse { + metadata, + content, + }) +} + +#[allow(deprecated)] // unauthenticated media +#[tracing::instrument(skip_all)] +async fn get_remote_thumbnail_via_legacy_api( + server_name: &ruma::ServerName, + authenticated_media_fed::get_content_thumbnail::v1::Request { + media_id, + method, + width, + height, + timeout_ms, + animated, + }: authenticated_media_fed::get_content_thumbnail::v1::Request, +) -> Result { + let content_response = services() + .sending + .send_federation_request( + server_name, + legacy_media::get_content_thumbnail::v3::Request { + server_name: server_name.to_owned(), + allow_remote: false, + allow_redirect: false, + media_id, + method, + width, + height, + timeout_ms, + animated, + }, + ) + .await?; + + Ok(RemoteResponse { + metadata: authenticated_media_fed::ContentMetadata {}, + content: authenticated_media_fed::Content { + file: content_response.file, + content_disposition: None, + content_type: content_response.content_type, + }, + }) +} + +#[tracing::instrument] +async fn get_remote_thumbnail( + server_name: &ruma::ServerName, + request: authenticated_media_fed::get_content_thumbnail::v1::Request, +) -> Result { + let fed_result = + get_remote_thumbnail_via_federation_api(server_name, request.clone()) + .await; + + let response = match fed_result { + Ok(response) => { + debug!("Got remote content via authenticated media API"); + response + } + Err(Error::Federation(_, error)) + if error.error_kind() == Some(&ErrorKind::Unrecognized) + // https://github.com/t2bot/matrix-media-repo/issues/609 + || error.error_kind() == Some(&ErrorKind::Unauthorized) => + { + info!( + "Remote server does not support authenticated media, falling \ + back to deprecated API" + ); + + get_remote_thumbnail_via_legacy_api(server_name, request.clone()) + .await? + } + Err(e) => { + return Err(e); + } + }; + + Ok(response) +} + +async fn get_content_thumbnail_route_ruma( + body: Ar, + allow_remote: AllowRemote, +) -> Result { + let mxc = MxcData::new(&body.server_name, &body.media_id)?; + let width = body.width.try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid.") + })?; + let height = body.height.try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid.") + })?; + + let make_response = |file, content_type| { + authenticated_media_client::get_content_thumbnail::v1::Response { + file, + content_type, + content_disposition: None, + } + }; + + if let Some(( + FileMeta { + content_type, + .. + }, + file, + )) = services() + .media + .get_thumbnail(mxc.clone().into(), width, height) + .await? + { + return Ok(make_response(file, content_type)); + } + + if &*body.server_name != services().globals.server_name() + && allow_remote == AllowRemote::Yes + { + let get_thumbnail_response = get_remote_thumbnail( + &body.server_name, + authenticated_media_fed::get_content_thumbnail::v1::Request { + height: body.height, + width: body.width, + method: body.method.clone(), + media_id: body.media_id.clone(), + timeout_ms: Duration::from_secs(20), + // we don't support animated thumbnails, so don't try requesting + // one - we're allowed to ignore the client's request for an + // animated thumbnail + animated: Some(false), + }, + ) + .await; + + match get_thumbnail_response { + Ok(resp) => { + services() + .media + .upload_thumbnail( + mxc.clone().into(), + None, + resp.content.content_type.clone(), + width, + height, + &resp.content.file, + ) + .await?; + + return Ok(make_response( + resp.content.file, + resp.content.content_type, + )); + } + Err(error) => warn!( + %error, + "Failed to fetch thumbnail via federation, trying to fetch \ + original media and create thumbnail ourselves" + ), + } + + get_remote_content(&mxc).await?; + + if let Some(( + FileMeta { + content_type, + .. + }, + file, + )) = + services().media.get_thumbnail(mxc.into(), width, height).await? + { + return Ok(make_response(file, content_type)); + } + + error!("Source media doesn't exist even after fetching it from remote"); + } + + Err(Error::BadRequest(ErrorKind::NotYetUploaded, "Media not found.")) } diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 6fe1e0ea..e0afec6b 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1,11 +1,18 @@ +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + sync::Arc, + time::{Duration, Instant}, +}; + use ruma::{ api::{ client::{ error::ErrorKind, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, - unban_user, ThirdPartySigned, + ban_user, forget_room, get_member_events, invite_user, + join_room_by_id, join_room_by_id_or_alias, joined_members, + joined_rooms, kick_user, leave_room, unban_user, + ThirdPartySigned, }, }, federation::{self, membership::create_invite}, @@ -18,38 +25,40 @@ use ruma::{ }, StateEventType, TimelineEventType, }, - serde::Base64, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, UserId, + room_version_rules::RoomVersionRules, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - sync::Arc, - time::{Duration, Instant}, -}; use tokio::sync::RwLock; use tracing::{debug, error, info, warn}; -use crate::{ - service::pdu::{gen_event_id_canonical_json, PduBuilder}, - services, utils, Error, PduEvent, Result, Ruma, -}; - use super::get_alias_helper; +use crate::{ + service::{ + globals::SigningKeys, + pdu::{gen_event_id_canonical_json, PduBuilder}, + rooms::state::ExtractVersion, + }, + services, utils, Ar, Error, PduEvent, Ra, Result, +}; /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// /// Tries to join the sender user into a room. /// -/// - If the server knowns about this room: creates the join event and does auth rules locally -/// - If the server does not know about the room: asks other servers over federation -pub async fn join_room_by_id_route( - body: Ruma, -) -> Result { +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +pub(crate) async fn join_room_by_id_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers = Vec::new(); // There is no body.server_name for /roomId/join + // There is no body.server_name for /roomId/join + let mut servers = Vec::new(); servers.extend( services() .rooms @@ -59,7 +68,7 @@ pub async fn join_room_by_id_route( .iter() .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); @@ -79,23 +88,28 @@ pub async fn join_room_by_id_route( body.third_party_signed.as_ref(), ) .await + .map(Ra) } /// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` /// /// Tries to join the sender user into a room. /// -/// - If the server knowns about this room: creates the join event and does auth rules locally -/// - If the server does not know about the room: asks other servers over federation -pub async fn join_room_by_id_or_alias_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +pub(crate) async fn join_room_by_id_or_alias_route( + body: Ar, +) -> Result> { + let sender_user = + body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) + { Ok(room_id) => { - let mut servers = body.server_name.clone(); + let mut servers = body.via.clone(); servers.extend( services() .rooms @@ -103,9 +117,13 @@ pub async fn join_room_by_id_or_alias_route( .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|event| { + serde_json::from_str(event.json().get()).ok() + }) + .filter_map(|event: serde_json::Value| { + event.get("sender").cloned() + }) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()), ); @@ -135,9 +153,9 @@ pub async fn join_room_by_id_or_alias_route( ) .await?; - Ok(join_room_by_id_or_alias::v3::Response { + Ok(Ra(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id, - }) + })) } /// # `POST /_matrix/client/r0/rooms/{roomId}/leave` @@ -145,25 +163,28 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -pub async fn leave_room_route( - body: Ruma, -) -> Result { +pub(crate) async fn leave_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); leave_room(sender_user, &body.room_id, body.reason.clone()).await?; - Ok(leave_room::v3::Response::new()) + Ok(Ra(leave_room::v3::Response::new())) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -pub async fn invite_user_route( - body: Ruma, -) -> Result { +pub(crate) async fn invite_user_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { + if let invite_user::v3::InvitationRecipient::UserId { + user_id, + } = &body.recipient + { invite_helper( sender_user, user_id, @@ -172,7 +193,7 @@ pub async fn invite_user_route( false, ) .await?; - Ok(invite_user::v3::Response {}) + Ok(Ra(invite_user::v3::Response {})) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -181,19 +202,11 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -pub async fn kick_user_route( - body: Ruma, -) -> Result { +pub(crate) async fn kick_user_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(true) = services() - .rooms - .state_cache - .is_left(sender_user, &body.room_id) - { - return Ok(kick_user::v3::Response {}); - } - let mut event: RoomMemberEventContent = serde_json::from_str( services() .rooms @@ -213,18 +226,13 @@ pub async fn kick_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); + event.reason.clone_from(&body.reason); - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; services() .rooms @@ -232,38 +240,30 @@ pub async fn kick_user_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), + content: to_raw_value(&event) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, }, sender_user, - &body.room_id, - &state_lock, + &room_token, ) .await?; - drop(state_lock); + drop(room_token); - Ok(kick_user::v3::Response::new()) + Ok(Ra(kick_user::v3::Response::new())) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -pub async fn ban_user_route(body: Ruma) -> Result { +pub(crate) async fn ban_user_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(Some(membership_event)) = services() - .rooms - .state_accessor - .get_member(&body.room_id, sender_user) - { - if membership_event.membership == MembershipState::Ban { - return Ok(ban_user::v3::Response {}); - } - } - let event = services() .rooms .state_accessor @@ -285,25 +285,25 @@ pub async fn ban_user_route(body: Ruma) -> Result) -> Result, -) -> Result { +pub(crate) async fn unban_user_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Ok(Some(membership_event)) = services() - .rooms - .state_accessor - .get_member(&body.room_id, sender_user) - { - if membership_event.membership != MembershipState::Ban { - return Ok(unban_user::v3::Response {}); - } - } - let mut event: RoomMemberEventContent = serde_json::from_str( services() .rooms @@ -364,18 +354,13 @@ pub async fn unban_user_route( .map_err(|_| Error::bad_database("Invalid member event in database."))?; event.membership = MembershipState::Leave; - event.reason = body.reason.clone(); + event.reason.clone_from(&body.reason); - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; services() .rooms @@ -383,69 +368,68 @@ pub async fn unban_user_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), + content: to_raw_value(&event) + .expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, }, sender_user, - &body.room_id, - &state_lock, + &room_token, ) .await?; - drop(state_lock); + drop(room_token); - Ok(unban_user::v3::Response::new()) + Ok(Ra(unban_user::v3::Response::new())) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` /// /// Forgets about a room. /// -/// - If the sender user currently left the room: Stops sender user from receiving information about the room +/// - If the sender user currently left the room: Stops sender user from +/// receiving information about the room /// -/// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to -/// be called from every device -pub async fn forget_room_route( - body: Ruma, -) -> Result { +/// Note: Other devices of the user have no way of knowing the room was +/// forgotten, so this has to be called from every device +pub(crate) async fn forget_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .rooms - .state_cache - .forget(&body.room_id, sender_user)?; + services().rooms.state_cache.forget(&body.room_id, sender_user)?; - Ok(forget_room::v3::Response::new()) + Ok(Ra(forget_room::v3::Response::new())) } /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -pub async fn joined_rooms_route( - body: Ruma, -) -> Result { +pub(crate) async fn joined_rooms_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(joined_rooms::v3::Response { + Ok(Ra(joined_rooms::v3::Response { joined_rooms: services() .rooms .state_cache .rooms_joined(sender_user) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect(), - }) + })) } /// # `POST /_matrix/client/r0/rooms/{roomId}/members` /// -/// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). +/// Lists all joined users in a room (TODO: at a specific point in time, with a +/// specific membership). /// /// - Only works if the user is currently joined -pub async fn get_member_events_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_member_events_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() @@ -454,12 +438,12 @@ pub async fn get_member_events_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } - Ok(get_member_events::v3::Response { + Ok(Ra(get_member_events::v3::Response { chunk: services() .rooms .state_accessor @@ -469,7 +453,7 @@ pub async fn get_member_events_route( .filter(|(key, _)| key.0 == StateEventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event()) .collect(), - }) + })) } /// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` @@ -478,9 +462,9 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -pub async fn joined_members_route( - body: Ruma, -) -> Result { +pub(crate) async fn joined_members_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() @@ -489,7 +473,7 @@ pub async fn joined_members_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } @@ -499,7 +483,7 @@ pub async fn joined_members_route( .rooms .state_cache .room_members(&body.room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) { let display_name = services().users.displayname(&user_id)?; let avatar_url = services().users.avatar_url(&user_id)?; @@ -513,9 +497,13 @@ pub async fn joined_members_route( ); } - Ok(joined_members::v3::Response { joined }) + Ok(Ra(joined_members::v3::Response { + joined, + })) } +#[allow(clippy::too_many_lines)] +#[tracing::instrument(skip(reason, _third_party_signed))] async fn join_room_by_id_helper( sender_user: Option<&UserId>, room_id: &RoomId, @@ -525,66 +513,180 @@ async fn join_room_by_id_helper( ) -> Result { let sender_user = sender_user.expect("user is authenticated"); - if let Ok(true) = services().rooms.state_cache.is_joined(sender_user, room_id) { - return Ok(join_room_by_id::v3::Response { - room_id: room_id.into(), - }); - } - - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; // Ask a remote server if we are not participating in this room - if !services() + if services() .rooms .state_cache .server_in_room(services().globals.server_name(), room_id)? { - info!("Joining {room_id} over federation."); + info!("We can join locally"); - let (make_join_response, remote_server) = - make_join_request(sender_user, room_id, servers).await?; + let join_rules_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomJoinRules, + "", + )?; - info!("make_join finished"); + let join_rules_event_content: Option = + join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()) + .map_err(|error| { + warn!(%error, "Invalid join rules event"); + Error::bad_database( + "Invalid join rules event in db.", + ) + }) + }) + .transpose()?; + + let restriction_rooms = match join_rules_event_content { + Some(RoomJoinRulesEventContent { + join_rule: + JoinRule::Restricted(restricted) + | JoinRule::KnockRestricted(restricted), + }) => restricted + .allow + .into_iter() + .filter_map(|a| match a { + AllowRule::RoomMembership(r) => Some(r.room_id), + _ => None, + }) + .collect(), + _ => Vec::new(), + }; + + let authorized_user = + if restriction_rooms.iter().any(|restriction_room_id| { + services() + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .unwrap_or(false) + }) { + let mut auth_user = None; + for user in services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .collect::>() + { + if user.server_name() == services().globals.server_name() + && services().rooms.state_accessor.user_can_invite( + &room_token, + &user, + sender_user, + ) + { + auth_user = Some(user); + break; + } + } + auth_user + } else { + None + }; + + let event = RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: reason.clone(), + join_authorized_via_users_server: authorized_user, + }; + + // Try normal join first + let error = match services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &room_token, + ) + .await + { + Ok(_event_id) => { + return Ok(join_room_by_id::v3::Response::new( + room_id.to_owned(), + )) + } + Err(e) => e, + }; + + if restriction_rooms.is_empty() + && servers.iter().any(|s| *s != services().globals.server_name()) + { + return Err(error); + } + + info!( + "We couldn't do the join locally, maybe federation can help to \ + satisfy the restricted join requirements" + ); + let Ok((make_join_response, remote_server)) = + make_join_request(sender_user, room_id, servers).await + else { + return Err(error); + }; let room_version_id = match make_join_response.room_version { - Some(room_version) + Some(room_version_id) if services() .globals .supported_room_versions() - .contains(&room_version) => + .contains(&room_version_id) => { - room_version + room_version_id + } + _ => { + return Err(Error::BadServerResponse( + "Room version is not supported", + )) } - _ => return Err(Error::BadServerResponse("Room version is not supported")), }; + let room_version_rules = room_version_id + .rules() + .expect("ruma should support all room versions we advertise"); - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|_| { - Error::BadServerResponse("Invalid make_join event json received from server.") - })?; - + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str( + make_join_response.event.get(), + ) + .map_err(|_| { + Error::BadServerResponse( + "Invalid make_join event json received from server.", + ) + })?; let join_authorized_via_users_server = join_event_stub .get("content") .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() + s.as_object()?.get("join_authorised_via_users_server")?.as_str() }) .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), - CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + CanonicalJsonValue::String( + services().globals.server_name().as_str().to_owned(), + ), ); join_event_stub.insert( "origin_server_ts".to_owned(), @@ -609,23 +711,186 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"), ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + // We don't leave the event id in the pdu because that's only allowed in + // v1 or v2 rooms join_event_stub.remove("event_id"); - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + // In order to create a compatible ref hash (EventID) the `hashes` field + // needs to be present ruma::signatures::hash_and_sign_event( services().globals.server_name().as_str(), services().globals.keypair(), &mut join_event_stub, - &room_version_id, + &room_version_rules.redaction, ) .expect("event is valid, we just created it"); // Generate event id let event_id = format!( "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash( + &join_event_stub, + &room_version_rules + ) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + join_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.to_owned(), + pdu: PduEvent::convert_to_outgoing_federation_event( + join_event.clone(), + ), + omit_members: false, + }, + ) + .await?; + + let Some(signed_raw) = send_join_response.room_state.event else { + return Err(error); + }; + + let Ok((signed_event_id, signed_value)) = + gen_event_id_canonical_json(&signed_raw, &room_version_rules) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; + + if signed_event_id != event_id { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Server sent event with wrong event id", + )); + } + + drop(room_token); + let pub_key_map = RwLock::new(BTreeMap::new()); + services() + .rooms + .event_handler + .handle_incoming_pdu( + &remote_server, + &signed_event_id, + room_id, + signed_value, + true, + &pub_key_map, + ) + .await?; + } else { + info!("Joining over federation."); + + let (make_join_response, remote_server) = + make_join_request(sender_user, room_id, servers).await?; + + info!("make_join finished"); + + let room_version_id = match make_join_response.room_version { + Some(room_version) + if services() + .globals + .supported_room_versions() + .contains(&room_version) => + { + room_version + } + _ => { + return Err(Error::BadServerResponse( + "Room version is not supported", + )) + } + }; + let room_version_rules = room_version_id + .rules() + .expect("ruma should support all room versions we advertise"); + + let mut join_event_stub: CanonicalJsonObject = serde_json::from_str( + make_join_response.event.get(), + ) + .map_err(|_| { + Error::BadServerResponse( + "Invalid make_join event json received from server.", + ) + })?; + + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()?.get("join_authorised_via_users_server")?.as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + + // TODO: Is origin needed? + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String( + services().globals.server_name().as_str().to_owned(), + ), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason, + join_authorized_via_users_server, + }) + .expect("event is valid, we just created it"), + ); + + // We don't leave the event id in the pdu because that's only allowed in + // v1 or v2 rooms + join_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field + // needs to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut join_event_stub, + &room_version_rules.redaction, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = format!( + "${}", + ruma::signatures::reference_hash( + &join_event_stub, + &room_version_rules + ) + .expect("ruma can calculate reference hashes") ); let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); @@ -639,7 +904,7 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let mut join_event = join_event_stub; - info!("Asking {remote_server} for send_join"); + info!(server = %remote_server, "Asking other server for send_join"); let send_join_response = services() .sending .send_federation_request( @@ -647,7 +912,9 @@ async fn join_room_by_id_helper( federation::membership::create_join_event::v2::Request { room_id: room_id.to_owned(), event_id: event_id.to_owned(), - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), + pdu: PduEvent::convert_to_outgoing_federation_event( + join_event.clone(), + ), omit_members: false, }, ) @@ -656,18 +923,19 @@ async fn join_room_by_id_helper( info!("send_join finished"); if let Some(signed_raw) = &send_join_response.room_state.event { - info!("There is a signed event. This room is probably using restricted joins. Adding signature to our event"); - let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(signed_raw, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; + info!( + "There is a signed event. This room is probably using \ + restricted joins. Adding signature to our event" + ); + let Ok((signed_event_id, signed_value)) = + gen_event_id_canonical_json(signed_raw, &room_version_rules) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + }; if signed_event_id != event_id { return Err(Error::BadRequest( @@ -696,9 +964,13 @@ async fn join_room_by_id_helper( .expect("we created a valid pdu") .insert(remote_server.to_string(), signature.clone()); } - Err(e) => { + Err(error) => { warn!( - "Server {remote_server} sent invalid signature in sendjoin signatures for event {signed_value:?}: {e:?}", + %error, + server = %remote_server, + event = ?signed_value, + "Other server sent invalid signature in sendjoin \ + signatures for event", ); } } @@ -707,8 +979,10 @@ async fn join_room_by_id_helper( services().rooms.short.get_or_create_shortroomid(room_id)?; info!("Parsing join event"); - let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone()) - .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; + let parsed_join_pdu = + PduEvent::from_id_val(event_id, join_event.clone()).map_err( + |_| Error::BadServerResponse("Invalid join event PDU."), + )?; let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -717,62 +991,66 @@ async fn join_room_by_id_helper( services() .rooms .event_handler - .fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map) + .fetch_join_signing_keys( + &send_join_response, + &room_version_rules, + &pub_key_map, + ) .await?; info!("Going through send_join response room_state"); - for result in send_join_response - .room_state - .state - .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) - { - let (event_id, value) = match result.await { - Ok(t) => t, - Err(_) => continue, + for result in send_join_response.room_state.state.iter().map(|pdu| { + validate_and_add_event_id(pdu, &room_version_rules, &pub_key_map) + }) { + let Ok((event_id, value)) = result.await else { + continue; }; - let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err(|e| { - warn!("Invalid PDU in send_join response: {} {:?}", e, value); - Error::BadServerResponse("Invalid PDU in send_join response.") - })?; + let pdu = PduEvent::from_id_val(&event_id, value.clone()).map_err( + |error| { + warn!( + %error, + object = ?value, + "Invalid PDU in send_join response", + ); + Error::BadServerResponse( + "Invalid PDU in send_join response.", + ) + }, + )?; - services() - .rooms - .outlier - .add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + state_key, + )?; state.insert(shortstatekey, pdu.event_id.clone()); } } info!("Going through send_join response auth_chain"); - for result in send_join_response - .room_state - .auth_chain - .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map)) + for result in + send_join_response.room_state.auth_chain.iter().map(|pdu| { + validate_and_add_event_id( + pdu, + &room_version_rules, + &pub_key_map, + ) + }) { - let (event_id, value) = match result.await { - Ok(t) => t, - Err(_) => continue, + let Ok((event_id, value)) = result.await else { + continue; }; - services() - .rooms - .outlier - .add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; } info!("Running send_join auth check"); - let authenticated = state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), + state_res::event_auth::auth_check( + &room_version_rules.authorization, &parsed_join_pdu, - None::, // TODO: third party invite |k, s| { services() .rooms @@ -782,53 +1060,52 @@ async fn join_room_by_id_helper( &services() .rooms .short - .get_or_create_shortstatekey(&k.to_string().into(), s) + .get_or_create_shortstatekey( + &k.to_string().into(), + s, + ) .ok()?, )?, ) .ok()? }, ) - .map_err(|e| { - warn!("Auth check failed: {e}"); + .map_err(|error| { + warn!(%error, "Auth check failed"); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") })?; - if !authenticated { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth check failed", - )); - } - info!("Saving state from send_join"); - let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( - room_id, - Arc::new( - state - .into_iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(k, &id) - }) - .collect::>()?, - ), - )?; + let (statehash_before_join, new, removed) = + services().rooms.state_compressor.save_state( + room_id, + Arc::new( + state + .into_iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) + .collect::>()?, + ), + )?; services() .rooms .state - .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .force_state(&room_token, statehash_before_join, new, removed) .await?; info!("Updating joined counts for new room"); services().rooms.state_cache.update_joined_count(room_id)?; - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehash_after_join = services().rooms.state.append_to_state(&parsed_join_pdu)?; + // We append to state before appending the pdu, so we don't have a + // moment in time with the pdu without it's state. This is okay + // because append_pdu can't fail. + let statehash_after_join = + services().rooms.state.append_to_state(&parsed_join_pdu)?; info!("Appending new room join event"); services() @@ -838,263 +1115,18 @@ async fn join_room_by_id_helper( &parsed_join_pdu, join_event, vec![(*parsed_join_pdu.event_id).to_owned()], - &state_lock, + &room_token, ) .await?; info!("Setting final room state for new room"); - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist + // We set the room state after inserting the pdu, so that we never have + // a moment in time where events in the current room state do + // not exist services() .rooms .state - .set_room_state(room_id, statehash_after_join, &state_lock)?; - } else { - info!("We can join locally"); - - let join_rules_event = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomJoinRules, - "", - )?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - let restriction_rooms = match join_rules_event_content { - Some(RoomJoinRulesEventContent { - join_rule: JoinRule::Restricted(restricted), - }) - | Some(RoomJoinRulesEventContent { - join_rule: JoinRule::KnockRestricted(restricted), - }) => restricted - .allow - .into_iter() - .filter_map(|a| match a { - AllowRule::RoomMembership(r) => Some(r.room_id), - _ => None, - }) - .collect(), - _ => Vec::new(), - }; - - let authorized_user = if restriction_rooms.iter().any(|restriction_room_id| { - services() - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - .unwrap_or(false) - }) { - let mut auth_user = None; - for user in services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(Result::ok) - .collect::>() - { - if user.server_name() == services().globals.server_name() - && services() - .rooms - .state_accessor - .user_can_invite(room_id, &user, sender_user, &state_lock) - .await - .unwrap_or(false) - { - auth_user = Some(user); - break; - } - } - auth_user - } else { - None - }; - - let event = RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: None, - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason: reason.clone(), - join_authorized_via_users_server: authorized_user, - }; - - // Try normal join first - let error = match services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - ) - .await - { - Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), - Err(e) => e, - }; - - if !restriction_rooms.is_empty() - && servers - .iter() - .any(|s| *s != services().globals.server_name()) - { - info!( - "We couldn't do the join locally, maybe federation can help to satisfy the restricted join requirements" - ); - let (make_join_response, remote_server) = - make_join_request(sender_user, room_id, servers).await?; - - let room_version_id = match make_join_response.room_version { - Some(room_version_id) - if services() - .globals - .supported_room_versions() - .contains(&room_version_id) => - { - room_version_id - } - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|_| { - Error::BadServerResponse("Invalid make_join event json received from server.") - })?; - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - // TODO: Is origin needed? - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: None, - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason, - join_authorized_via_users_server, - }) - .expect("event is valid, we just created it"), - ); - - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - join_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - services().globals.server_name().as_str(), - services().globals.keypair(), - &mut join_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&join_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - join_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let join_event = join_event_stub; - - let send_join_response = services() - .sending - .send_federation_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.to_owned(), - pdu: PduEvent::convert_to_outgoing_federation_event(join_event.clone()), - omit_members: false, - }, - ) - .await?; - - if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(&signed_raw, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - if signed_event_id != event_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Server sent event with wrong event id", - )); - } - - drop(state_lock); - let pub_key_map = RwLock::new(BTreeMap::new()); - services() - .rooms - .event_handler - .handle_incoming_pdu( - &remote_server, - &signed_event_id, - room_id, - signed_value, - true, - &pub_key_map, - ) - .await?; - } else { - return Err(error); - } - } else { - return Err(error); - } + .set_room_state(&room_token, statehash_after_join)?; } Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) @@ -1116,7 +1148,7 @@ async fn make_join_request( if remote_server == services().globals.server_name() { continue; } - info!("Asking {remote_server} for make_join"); + info!(server = %remote_server, "Asking other server for make_join"); let make_join_response = services() .sending .send_federation_request( @@ -1129,10 +1161,18 @@ async fn make_join_request( ) .await; - make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); - - if make_join_response_and_server.is_ok() { - break; + match make_join_response { + Ok(r) => { + return Ok((r, remote_server.clone())); + } + Err(error) => { + warn!( + %error, + server = %remote_server, + "Remote join request failed", + ); + make_join_response_and_server = Err(error); + } } } @@ -1141,57 +1181,89 @@ async fn make_join_request( async fn validate_and_add_event_id( pdu: &RawJsonValue, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + room_version_rules: &RoomVersionRules, + pub_key_map: &RwLock>, ) -> Result<(OwnedEventId, CanonicalJsonObject)> { - let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; + let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()) + .map_err(|error| { + error!(%error, object = ?pdu, "Invalid PDU in server response"); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = EventId::parse(format!( "${}", - ruma::signatures::reference_hash(&value, room_version) + ruma::signatures::reference_hash(&value, room_version_rules) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); let back_off = |id| async { - match services() - .globals - .bad_event_ratelimiter - .write() - .await - .entry(id) - { + match services().globals.bad_event_ratelimiter.write().await.entry(id) { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } - Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1); + } } }; - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .await - .get(&event_id) + if let Some((time, tries)) = + services().globals.bad_event_ratelimiter.read().await.get(&event_id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(30) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); + debug!(%event_id, "Backing off from event"); + return Err(Error::BadServerResponse( + "bad event, still backing off", + )); } } - if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) + let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { + error!("Invalid PDU, no origin_server_ts field"); + Error::BadRequest( + ErrorKind::MissingParam, + "Invalid PDU, no origin_server_ts field", + ) + })?; + + let origin_server_ts: MilliSecondsSinceUnixEpoch = { + let ts = origin_server_ts.as_integer().ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "origin_server_ts must be an integer", + ) + })?; + + MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Time must be after the unix epoch", + ) + })?) + }; + + let unfiltered_keys = (*pub_key_map.read().await).clone(); + + let keys = services() + .globals + .filter_keys_server_map(unfiltered_keys, origin_server_ts); + + if let Err(error) = + ruma::signatures::verify_event(&keys, &value, room_version_rules) { - warn!("Event {} failed verification {:?} {}", event_id, pdu, e); + warn!( + %event_id, + %error, + ?pdu, + "Event failed verification", + ); back_off(event_id).await; return Err(Error::BadServerResponse("Event failed verification.")); } @@ -1204,7 +1276,8 @@ async fn validate_and_add_event_id( Ok((event_id, value)) } -pub(crate) async fn invite_helper<'a>( +#[allow(clippy::too_many_lines)] +pub(crate) async fn invite_helper( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, @@ -1213,16 +1286,11 @@ pub(crate) async fn invite_helper<'a>( ) -> Result<()> { if user_id.server_name() != services().globals.server_name() { let (pdu, pdu_json, invite_room_state) = { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, @@ -1236,27 +1304,34 @@ pub(crate) async fn invite_helper<'a>( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - )?; + let (pdu, pdu_json) = + services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + &room_token, + )?; - let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; + let invite_room_state = + services().rooms.state.get_helpful_invite_events(&pdu)?; - drop(state_lock); + drop(room_token); (pdu, pdu_json, invite_room_state) }; - let room_version_id = services().rooms.state.get_room_version(room_id)?; + let room_version_id = services() + .rooms + .state + .get_create_content::(room_id)?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id)); + }; let response = services() .sending @@ -1266,7 +1341,9 @@ pub(crate) async fn invite_helper<'a>( room_id: room_id.to_owned(), event_id: (*pdu.event_id).to_owned(), room_version: room_version_id.clone(), - event: PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), + event: PduEvent::convert_to_outgoing_federation_event( + pdu_json.clone(), + ), invite_room_state, }, ) @@ -1274,36 +1351,55 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event, &room_version_id) - { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } + // We do not add the event_id field to the pdu here because of signature + // and hashes checks + let Ok((event_id, value)) = + gen_event_id_canonical_json(&response.event, &room_version_rules) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); }; if *pdu.event_id != *event_id { - warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); + warn!( + server = %user_id.server_name(), + our_object = ?pdu_json, + their_object = ?value, + "Other server changed invite event, that's not allowed in the \ + spec", + ); } let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event needs an origin field.", - ))?) + serde_json::to_value(value.get("origin").ok_or( + Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ), + )?) .expect("CanonicalJson is valid json value"), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Origin field is invalid.", + ) + })?; - let pdu_id: Vec = services() + let pdu_id = services() .rooms .event_handler - .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .handle_incoming_pdu( + &origin, + &event_id, + room_id, + value, + true, + &pub_key_map, + ) .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -1315,7 +1411,7 @@ pub(crate) async fn invite_helper<'a>( .rooms .state_cache .room_servers(room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter(|server| &**server != services().globals.server_name()); services().sending.send_pdu(servers, &pdu_id)?; @@ -1323,27 +1419,18 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } - if !services() - .rooms - .state_cache - .is_joined(sender_user, room_id)? - { + if !services().rooms.state_cache.is_joined(sender_user, room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; services() .rooms @@ -1367,18 +1454,17 @@ pub(crate) async fn invite_helper<'a>( redacts: None, }, sender_user, - room_id, - &state_lock, + &room_token, ) .await?; - drop(state_lock); + drop(room_token); Ok(()) } // Make a user leave all their joined rooms -pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { +pub(crate) async fn leave_all_rooms(user_id: &UserId) -> Result<()> { let all_rooms = services() .rooms .state_cache @@ -1393,26 +1479,88 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { .collect::>(); for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, + let Ok(room_id) = room_id else { + continue; }; - let _ = leave_room(user_id, &room_id, None).await; + if let Err(error) = leave_room(user_id, &room_id, None).await { + warn!(%user_id, %room_id, %error, "Failed to leave room"); + } } Ok(()) } -pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option) -> Result<()> { +#[tracing::instrument(skip(reason))] +pub(crate) async fn leave_room( + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result<()> { // Ask a remote server if we don't have this room - if !services() + if services() .rooms .state_cache .server_in_room(services().globals.server_name(), room_id)? { - if let Err(e) = remote_leave_room(user_id, room_id).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; + + let member_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + )?; + + // Fix for broken rooms + let member_event = match member_event { + None => { + error!("Trying to leave a room you are not a member of."); + + services().rooms.state_cache.update_membership( + room_id, + user_id, + &MembershipState::Leave, + user_id, + None, + true, + )?; + return Ok(()); + } + Some(e) => e, + }; + + let mut event: RoomMemberEventContent = + serde_json::from_str(member_event.content.get()).map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + event.membership = MembershipState::Leave; + event.reason = reason; + event.join_authorized_via_users_server = None; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content: to_raw_value(&event) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_token, + ) + .await?; + } else { + if let Err(error) = remote_leave_room(user_id, room_id).await { + warn!(%error, "Failed to leave room remotely"); // Don't tell the client about this error } @@ -1429,94 +1577,32 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option { - error!("Trying to leave a room you are not a member of."); - - services().rooms.state_cache.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - None, - true, - )?; - return Ok(()); - } - Some(e) => e, - }; - - let mut event: RoomMemberEventContent = serde_json::from_str(member_event.content.get()) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - event.reason = reason; - event.join_authorized_via_users_server = None; - - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - &state_lock, - ) - .await?; } Ok(()) } +#[allow(clippy::too_many_lines)] async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut make_leave_response_and_server = Err(Error::BadServerResponse( "No server available to assist in leaving.", )); - let invite_state = services() - .rooms - .state_cache - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; + let invite_state = + services().rooms.state_cache.invite_state(user_id, room_id)?.ok_or( + Error::BadRequest(ErrorKind::BadState, "User is not invited."), + )?; let servers: HashSet<_> = invite_state .iter() .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| sender.as_str().map(ToOwned::to_owned)) .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -1533,7 +1619,8 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { ) .await; - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + make_leave_response_and_server = + make_leave_response.map(|r| (r, remote_server)); if make_leave_response_and_server.is_ok() { break; @@ -1551,18 +1638,31 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { { version } - _ => return Err(Error::BadServerResponse("Room version is not supported")), + _ => { + return Err(Error::BadServerResponse( + "Room version is not supported", + )) + } }; + let room_version_rules = room_version_id + .rules() + .expect("ruma should support all room versions we advertise"); let mut leave_event_stub = serde_json::from_str::( make_leave_response.event.get(), ) - .map_err(|_| Error::BadServerResponse("Invalid make_leave event json received from server."))?; + .map_err(|_| { + Error::BadServerResponse( + "Invalid make_leave event json received from server.", + ) + })?; // TODO: Is origin needed? leave_event_stub.insert( "origin".to_owned(), - CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + CanonicalJsonValue::String( + services().globals.server_name().as_str().to_owned(), + ), ); leave_event_stub.insert( "origin_server_ts".to_owned(), @@ -1572,23 +1672,28 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { .expect("Timestamp is valid js_int value"), ), ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + // We don't leave the event id in the pdu because that's only allowed in v1 + // or v2 rooms leave_event_stub.remove("event_id"); - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + // In order to create a compatible ref hash (EventID) the `hashes` field + // needs to be present ruma::signatures::hash_and_sign_event( services().globals.server_name().as_str(), services().globals.keypair(), &mut leave_event_stub, - &room_version_id, + &room_version_rules.redaction, ) .expect("event is valid, we just created it"); // Generate event id let event_id = EventId::parse(format!( "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash( + &leave_event_stub, + &room_version_rules + ) + .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -1608,7 +1713,9 @@ async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { federation::membership::create_leave_event::v2::Request { room_id: room_id.to_owned(), event_id, - pdu: PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + pdu: PduEvent::convert_to_outgoing_federation_event( + leave_event.clone(), + ), }, ) .await?; diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 89f33591..df723d07 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,77 +1,82 @@ -use crate::{ - service::{pdu::PduBuilder, rooms::timeline::PduCount}, - services, utils, Error, Result, Ruma, -}; +use std::collections::{BTreeMap, HashSet}; + use ruma::{ api::client::{ error::ErrorKind, message::{get_message_events, send_message_event}, }, events::{StateEventType, TimelineEventType}, + uint, }; -use std::{ - collections::{BTreeMap, HashSet}, - sync::Arc, + +use crate::{ + service::{pdu::PduBuilder, rooms::timeline::PduCount}, + services, utils, Ar, Error, Ra, Result, }; /// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` /// /// Send a message event into the room. /// -/// - Is a NOOP if the txn id was already used before and returns the same event id again +/// - Is a NOOP if the txn id was already used before and returns the same event +/// id again /// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is allowed -pub async fn send_message_event_route( - body: Ruma, -) -> Result { +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +pub(crate) async fn send_message_event_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; // Forbid m.room.encrypted if encryption is disabled if TimelineEventType::RoomEncrypted == body.event_type.to_string().into() && !services().globals.allow_encryption() { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Encryption has been disabled", )); } // Check if this is a new transaction id - if let Some(response) = - services() - .transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id)? - { + if let Some(response) = services().transaction_ids.existing_txnid( + sender_user, + sender_device, + &body.txn_id, + )? { // The client might have sent a txnid of the /sendToDevice endpoint // This txnid has no response associated with it if response.is_empty() { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Tried to use txn id already used for an incompatible endpoint.", + "Tried to use txn id already used for an incompatible \ + endpoint.", )); } let event_id = utils::string_from_bytes(&response) - .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? + .map_err(|_| { + Error::bad_database("Invalid txnid bytes in database.") + })? .try_into() - .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::v3::Response { event_id }); + .map_err(|_| { + Error::bad_database("Invalid event id in txnid data.") + })?; + return Ok(Ra(send_message_event::v3::Response { + event_id, + })); } let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); + unsigned + .insert("transaction_id".to_owned(), body.txn_id.to_string().into()); let event_id = services() .rooms @@ -80,14 +85,18 @@ pub async fn send_message_event_route( PduBuilder { event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + .map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid JSON body.", + ) + })?, unsigned: Some(unsigned), state_key: None, redacts: None, }, sender_user, - &body.room_id, - &state_lock, + &room_token, ) .await?; @@ -98,45 +107,51 @@ pub async fn send_message_event_route( event_id.as_bytes(), )?; - drop(state_lock); + drop(room_token); - Ok(send_message_event::v3::Response::new( - (*event_id).to_owned(), - )) + Ok(Ra(send_message_event::v3::Response::new((*event_id).to_owned()))) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` /// /// Allows paginating through room history. /// -/// - Only works if the user is joined (TODO: always allow, but only show events where the user was -/// joined, depending on history_visibility) -pub async fn get_message_events_route( - body: Ruma, -) -> Result { +/// - Only works if the user is joined (TODO: always allow, but only show events +/// where the user was joined, depending on `history_visibility`) +#[allow(clippy::too_many_lines)] +pub(crate) async fn get_message_events_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); let from = match body.from.clone() { Some(from) => PduCount::try_from_string(&from)?, None => match body.dir { - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), + ruma::api::Direction::Forward => PduCount::MIN, + ruma::api::Direction::Backward => PduCount::MAX, }, }; - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); + let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); services() .rooms .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from) + .lazy_load_confirm_delivery( + sender_user, + sender_device, + &body.room_id, + from, + ) .await?; - let limit = u64::from(body.limit).min(100) as usize; + let limit = body + .limit + .min(uint!(100)) + .try_into() + .expect("0-100 should fit in usize"); let next_token; @@ -151,21 +166,27 @@ pub async fn get_message_events_route( .timeline .pdus_after(sender_user, &body.room_id, from)? .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .user_can_see_event( + sender_user, + &body.room_id, + &pdu.event_id, + ) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); for (_, event) in &events_after { - /* TODO: Remove this when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 + // * https://github.com/vector-im/element-android/issues/3417 + // * https://github.com/vector-im/element-web/issues/21034 + // + // TODO: When the above issues are resolved, uncomment this: + /* if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -175,6 +196,7 @@ pub async fn get_message_events_route( lazy_loaded.insert(event.sender.clone()); } */ + // And delete this line: lazy_loaded.insert(event.sender.clone()); } @@ -200,21 +222,27 @@ pub async fn get_message_events_route( .timeline .pdus_until(sender_user, &body.room_id, from)? .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &pdu.event_id) + .user_can_see_event( + sender_user, + &body.room_id, + &pdu.event_id, + ) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); for (_, event) in &events_before { - /* TODO: Remove this when these are resolved: - * https://github.com/vector-im/element-android/issues/3417 - * https://github.com/vector-im/element-web/issues/21034 + // * https://github.com/vector-im/element-android/issues/3417 + // * https://github.com/vector-im/element-web/issues/21034 + // + // TODO: When the above issues are resolved, uncomment this: + /* if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -224,6 +252,7 @@ pub async fn get_message_events_route( lazy_loaded.insert(event.sender.clone()); } */ + // And delete this line: lazy_loaded.insert(event.sender.clone()); } @@ -242,16 +271,21 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { - if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &body.room_id, - &StateEventType::RoomMember, - ll_id.as_str(), - )? { + if let Some(member_event) = + services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomMember, + ll_id.as_str(), + )? + { resp.state.push(member_event.to_state_event()); } } - // TODO: enable again when we are sure clients can handle it + // * https://github.com/vector-im/element-android/issues/3417 + // * https://github.com/vector-im/element-web/issues/21034 + // + // TODO: When the above issues are resolved, uncomment this: /* if let Some(next_token) = next_token { services().rooms.lazy_loading.lazy_load_mark_sent( @@ -264,5 +298,5 @@ pub async fn get_message_events_route( } */ - Ok(resp) + Ok(Ra(resp)) } diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs deleted file mode 100644 index 54c99aa0..00000000 --- a/src/api/client_server/mod.rs +++ /dev/null @@ -1,74 +0,0 @@ -mod account; -mod alias; -mod backup; -mod capabilities; -mod config; -mod context; -mod device; -mod directory; -mod filter; -mod keys; -mod media; -mod membership; -mod message; -mod presence; -mod profile; -mod push; -mod read_marker; -mod redact; -mod relations; -mod report; -mod room; -mod search; -mod session; -mod space; -mod state; -mod sync; -mod tag; -mod thirdparty; -mod threads; -mod to_device; -mod typing; -mod unversioned; -mod user_directory; -mod voip; - -pub use account::*; -pub use alias::*; -pub use backup::*; -pub use capabilities::*; -pub use config::*; -pub use context::*; -pub use device::*; -pub use directory::*; -pub use filter::*; -pub use keys::*; -pub use media::*; -pub use membership::*; -pub use message::*; -pub use presence::*; -pub use profile::*; -pub use push::*; -pub use read_marker::*; -pub use redact::*; -pub use relations::*; -pub use report::*; -pub use room::*; -pub use search::*; -pub use session::*; -pub use space::*; -pub use state::*; -pub use sync::*; -pub use tag::*; -pub use thirdparty::*; -pub use threads::*; -pub use to_device::*; -pub use typing::*; -pub use unversioned::*; -pub use user_directory::*; -pub use voip::*; - -pub const DEVICE_ID_LENGTH: usize = 10; -pub const TOKEN_LENGTH: usize = 32; -pub const SESSION_ID_LENGTH: usize = 32; -pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs deleted file mode 100644 index e5cd1b8e..00000000 --- a/src/api/client_server/presence.rs +++ /dev/null @@ -1,90 +0,0 @@ -use crate::{services, utils, Error, Result, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, -}; -use std::time::Duration; - -/// # `PUT /_matrix/client/r0/presence/{userId}/status` -/// -/// Sets the presence state of the sender user. -pub async fn set_presence_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - for room_id in services().rooms.state_cache.rooms_joined(sender_user) { - let room_id = room_id?; - - services().rooms.edus.presence.update_presence( - sender_user, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: services().users.avatar_url(sender_user)?, - currently_active: None, - displayname: services().users.displayname(sender_user)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: body.presence.clone(), - status_msg: body.status_msg.clone(), - }, - sender: sender_user.clone(), - }, - )?; - } - - Ok(set_presence::v3::Response {}) -} - -/// # `GET /_matrix/client/r0/presence/{userId}/status` -/// -/// Gets the presence state of the given user. -/// -/// - Only works if you share a room with the user -pub async fn get_presence_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut presence_event = None; - - for room_id in services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? - { - let room_id = room_id?; - - if let Some(presence) = services() - .rooms - .edus - .presence - .get_last_presence_event(sender_user, &room_id)? - { - presence_event = Some(presence); - break; - } - } - - if let Some(presence) = presence_event { - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg: presence.content.status_msg, - currently_active: presence.content.currently_active, - last_active_ago: presence - .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - presence: presence.content.presence, - }) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )) - } -} diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index cf1db2d7..8a84f743 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,39 +1,41 @@ -use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ error::ErrorKind, profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, + get_avatar_url, get_display_name, get_profile, set_avatar_url, + set_display_name, }, }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, StateEventType, TimelineEventType}, + events::{ + room::member::RoomMemberEventContent, StateEventType, TimelineEventType, + }, }; use serde_json::value::to_raw_value; -use std::sync::Arc; +use tracing::warn; + +use crate::{service::pdu::PduBuilder, services, Ar, Error, Ra, Result}; /// # `PUT /_matrix/client/r0/profile/{userId}/displayname` /// /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -pub async fn set_displayname_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_displayname_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .users - .set_displayname(sender_user, body.displayname.clone())?; + services().users.set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms let all_rooms_joined: Vec<_> = services() .rooms .state_cache .rooms_joined(sender_user) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .map(|room_id| { Ok::<_, Error>(( PduBuilder { @@ -52,14 +54,18 @@ pub async fn set_displayname_route( )? .ok_or_else(|| { Error::bad_database( - "Tried to send displayname update for user not in the \ - room.", + "Tried to send displayname update for \ + user not in the room.", ) })? .content .get(), ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + .map_err(|_| { + Error::bad_database( + "Database contains invalid PDU.", + ) + })? }) .expect("event is valid, we just created it"), unsigned: None, @@ -69,50 +75,27 @@ pub async fn set_displayname_route( room_id, )) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect(); for (pdu_builder, room_id) in all_rooms_joined { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.clone()) .await; - // Presence update - services().rooms.edus.presence.update_presence( - sender_user, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: services().users.avatar_url(sender_user)?, - currently_active: None, - displayname: services().users.displayname(sender_user)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, - }, - sender: sender_user.clone(), - }, - )?; + if let Err(error) = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_token) + .await + { + warn!(%error, "failed to add PDU"); + } } - Ok(set_display_name::v3::Response {}) + Ok(Ra(set_display_name::v3::Response {})) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -120,9 +103,9 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -pub async fn get_displayname_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_displayname_route( + body: Ar, +) -> Result> { if body.user_id.server_name() != services().globals.server_name() { let response = services() .sending @@ -135,40 +118,36 @@ pub async fn get_displayname_route( ) .await?; - return Ok(get_display_name::v3::Response { + return Ok(Ra(get_display_name::v3::Response { displayname: response.displayname, - }); + })); } - Ok(get_display_name::v3::Response { + Ok(Ra(get_display_name::v3::Response { displayname: services().users.displayname(&body.user_id)?, - }) + })) } /// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url` /// -/// Updates the avatar_url and blurhash. +/// Updates the `avatar_url` and `blurhash`. /// /// - Also makes sure other users receive the update using presence EDUs -pub async fn set_avatar_url_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_avatar_url_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .users - .set_avatar_url(sender_user, body.avatar_url.clone())?; + services().users.set_avatar_url(sender_user, body.avatar_url.clone())?; - services() - .users - .set_blurhash(sender_user, body.blurhash.clone())?; + services().users.set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = services() .rooms .state_cache .rooms_joined(sender_user) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .map(|room_id| { Ok::<_, Error>(( PduBuilder { @@ -187,14 +166,18 @@ pub async fn set_avatar_url_route( )? .ok_or_else(|| { Error::bad_database( - "Tried to send displayname update for user not in the \ - room.", + "Tried to send displayname update for \ + user not in the room.", ) })? .content .get(), ) - .map_err(|_| Error::bad_database("Database contains invalid PDU."))? + .map_err(|_| { + Error::bad_database( + "Database contains invalid PDU.", + ) + })? }) .expect("event is valid, we just created it"), unsigned: None, @@ -204,60 +187,38 @@ pub async fn set_avatar_url_route( room_id, )) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect(); for (pdu_builder, room_id) in all_joined_rooms { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.clone()) .await; - // Presence update - services().rooms.edus.presence.update_presence( - sender_user, - &room_id, - ruma::events::presence::PresenceEvent { - content: ruma::events::presence::PresenceEventContent { - avatar_url: services().users.avatar_url(sender_user)?, - currently_active: None, - displayname: services().users.displayname(sender_user)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), - presence: ruma::presence::PresenceState::Online, - status_msg: None, - }, - sender: sender_user.clone(), - }, - )?; + if let Err(error) = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_token) + .await + { + warn!(%error, "failed to add PDU"); + }; } - Ok(set_avatar_url::v3::Response {}) + Ok(Ra(set_avatar_url::v3::Response {})) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` /// -/// Returns the avatar_url and blurhash of the user. +/// Returns the `avatar_url` and `blurhash` of the user. /// -/// - If user is on another server: Fetches avatar_url and blurhash over federation -pub async fn get_avatar_url_route( - body: Ruma, -) -> Result { +/// - If user is on another server: Fetches `avatar_url` and `blurhash` over +/// federation +pub(crate) async fn get_avatar_url_route( + body: Ar, +) -> Result> { if body.user_id.server_name() != services().globals.server_name() { let response = services() .sending @@ -270,26 +231,26 @@ pub async fn get_avatar_url_route( ) .await?; - return Ok(get_avatar_url::v3::Response { + return Ok(Ra(get_avatar_url::v3::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, - }); + })); } - Ok(get_avatar_url::v3::Response { + Ok(Ra(get_avatar_url::v3::Response { avatar_url: services().users.avatar_url(&body.user_id)?, blurhash: services().users.blurhash(&body.user_id)?, - }) + })) } /// # `GET /_matrix/client/r0/profile/{userId}` /// -/// Returns the displayname, avatar_url and blurhash of the user. +/// Returns the `displayname`, `avatar_url` and `blurhash` of the user. /// /// - If user is on another server: Fetches profile over federation -pub async fn get_profile_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_profile_route( + body: Ar, +) -> Result> { if body.user_id.server_name() != services().globals.server_name() { let response = services() .sending @@ -302,11 +263,11 @@ pub async fn get_profile_route( ) .await?; - return Ok(get_profile::v3::Response { + return Ok(Ra(get_profile::v3::Response { displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, - }); + })); } if !services().users.exists(&body.user_id)? { @@ -317,9 +278,9 @@ pub async fn get_profile_route( )); } - Ok(get_profile::v3::Response { + Ok(Ra(get_profile::v3::Response { avatar_url: services().users.avatar_url(&body.user_id)?, blurhash: services().users.blurhash(&body.user_id)?, displayname: services().users.displayname(&body.user_id)?, - }) + })) } diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 72768662..258766db 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,69 +1,63 @@ -use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, push::{ - delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, - get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, - set_pushrule_enabled, RuleScope, + delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, + get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, + set_pushrule_actions, set_pushrule_enabled, }, }, - events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, - push::{InsertPushRuleError, RemovePushRuleError}, + events::push_rules::PushRulesEventContent, + push::{AnyPushRuleRef, InsertPushRuleError, RemovePushRuleError}, + serde::Raw, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -pub async fn get_pushrules_all_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_pushrules_all_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; + let account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; - Ok(get_pushrules_all::v3::Response { + Ok(Ra(get_pushrules_all::v3::Response { global: account_data.global, - }) + })) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -pub async fn get_pushrule_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_pushrule_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; + let account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; let rule = account_data .global @@ -71,47 +65,36 @@ pub async fn get_pushrule_route( .map(Into::into); if let Some(rule) = rule { - Ok(get_pushrule::v3::Response { rule }) + Ok(Ra(get_pushrule::v3::Response { + rule, + })) } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Push rule not found.", - )) + Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) } } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -pub async fn set_pushrule_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_pushrule_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + let mut account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; - if let Err(error) = account_data.content.global.insert( + if let Err(error) = account_data.global.insert( body.rule.clone(), body.after.as_deref(), body.before.as_deref(), @@ -119,16 +102,20 @@ pub async fn set_pushrule_route( let err = match error { InsertPushRuleError::ServerDefaultRuleId => Error::BadRequest( ErrorKind::InvalidParam, - "Rule IDs starting with a dot are reserved for server-default rules.", + "Rule IDs starting with a dot are reserved for server-default \ + rules.", ), InsertPushRuleError::InvalidRuleId => Error::BadRequest( ErrorKind::InvalidParam, "Rule ID containing invalid characters.", ), - InsertPushRuleError::RelativeToServerDefaultRule => Error::BadRequest( - ErrorKind::InvalidParam, - "Can't place a push rule relatively to a server-default rule.", - ), + InsertPushRuleError::RelativeToServerDefaultRule => { + Error::BadRequest( + ErrorKind::InvalidParam, + "Can't place a push rule relatively to a server-default \ + rule.", + ) + } InsertPushRuleError::UnknownRuleId => Error::BadRequest( ErrorKind::NotFound, "The before or after rule could not be found.", @@ -143,46 +130,34 @@ pub async fn set_pushrule_route( return Err(err); } - services().account_data.update( - None, + services().account_data.update_global( sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), + &Raw::new(&account_data) + .expect("json event serialization should always succeed"), )?; - Ok(set_pushrule::v3::Response {}) + Ok(Ra(set_pushrule::v3::Response {})) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -pub async fn get_pushrule_actions_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_pushrule_actions_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))? - .content; + let account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; let global = account_data.global; let actions = global @@ -193,41 +168,32 @@ pub async fn get_pushrule_actions_route( "Push rule not found.", ))?; - Ok(get_pushrule_actions::v3::Response { actions }) + Ok(Ra(get_pushrule_actions::v3::Response { + actions, + })) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -pub async fn set_pushrule_actions_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_pushrule_actions_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + let mut account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; if account_data - .content .global .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) .is_err() @@ -238,90 +204,70 @@ pub async fn set_pushrule_actions_route( )); } - services().account_data.update( - None, + services().account_data.update_global( sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), + &Raw::new(&account_data) + .expect("json event serialization should always suceed"), )?; - Ok(set_pushrule_actions::v3::Response {}) + Ok(Ra(set_pushrule_actions::v3::Response {})) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -pub async fn get_pushrule_enabled_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_pushrule_enabled_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + let account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; - let global = account_data.content.global; + let global = account_data.global; let enabled = global .get(body.kind.clone(), &body.rule_id) - .map(|r| r.enabled()) + .map(AnyPushRuleRef::enabled) .ok_or(Error::BadRequest( ErrorKind::NotFound, "Push rule not found.", ))?; - Ok(get_pushrule_enabled::v3::Response { enabled }) + Ok(Ra(get_pushrule_enabled::v3::Response { + enabled, + })) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -pub async fn set_pushrule_enabled_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_pushrule_enabled_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + let mut account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; if account_data - .content .global .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) .is_err() @@ -332,50 +278,37 @@ pub async fn set_pushrule_enabled_route( )); } - services().account_data.update( - None, + services().account_data.update_global( sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), + &Raw::new(&account_data) + .expect("json event serialization should always succeed"), )?; - Ok(set_pushrule_enabled::v3::Response {}) + Ok(Ra(set_pushrule_enabled::v3::Response {})) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -pub async fn delete_pushrule_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_pushrule_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if body.scope != RuleScope::Global { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Scopes other than 'global' are not supported.", - )); - } - let event = services() .account_data - .get( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let mut account_data = serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + let mut account_data = event.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; - if let Err(error) = account_data - .content - .global - .remove(body.kind.clone(), &body.rule_id) + if let Err(error) = + account_data.global.remove(body.kind.clone(), &body.rule_id) { let err = match error { RemovePushRuleError::ServerDefault => Error::BadRequest( @@ -391,27 +324,26 @@ pub async fn delete_pushrule_route( return Err(err); } - services().account_data.update( - None, + services().account_data.update_global( sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), + &Raw::new(&account_data) + .expect("json event serialization should always suceed"), )?; - Ok(delete_pushrule::v3::Response {}) + Ok(Ra(delete_pushrule::v3::Response {})) } /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -pub async fn get_pushers_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_pushers_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_pushers::v3::Response { + Ok(Ra(get_pushers::v3::Response { pushers: services().pusher.get_pushers(sender_user)?, - }) + })) } /// # `POST /_matrix/client/r0/pushers/set` @@ -419,14 +351,12 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -pub async fn set_pushers_route( - body: Ruma, -) -> Result { +pub(crate) async fn set_pushers_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services() - .pusher - .set_pusher(sender_user, body.action.clone())?; + services().pusher.set_pusher(sender_user, body.action.clone())?; - Ok(set_pusher::v3::Response::default()) + Ok(Ra(set_pusher::v3::Response::default())) } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index a5553d25..dc83de60 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,36 +1,40 @@ -use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma}; +use std::collections::BTreeMap; + use ruma::{ - api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::{ - receipt::{ReceiptThread, ReceiptType}, - RoomAccountDataEventType, + api::client::{ + error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt, }, + events::receipt::{ReceiptThread, ReceiptType}, + serde::Raw, MilliSecondsSinceUnixEpoch, }; -use std::collections::BTreeMap; + +use crate::{ + service::rooms::timeline::PduCount, services, Ar, Error, Ra, Result, +}; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// /// Sets different types of read markers. /// /// - Updates fully-read account data event to `fully_read` -/// - If `read_receipt` is set: Update private marker and public read receipt EDU -pub async fn set_read_marker_route( - body: Ruma, -) -> Result { +/// - If `read_receipt` is set: Update private marker and public read receipt +/// EDU +pub(crate) async fn set_read_marker_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Some(fully_read) = &body.fully_read { - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { + let fully_read_event = + ruma::events::fully_read::FullyReadEventContent { event_id: fully_read.clone(), - }, - }; - services().account_data.update( - Some(&body.room_id), + }; + services().account_data.update_room( + &body.room_id, sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), + &Raw::new(&fully_read_event) + .expect("json event serialization should always suceed"), )?; } @@ -42,14 +46,9 @@ pub async fn set_read_marker_route( } if let Some(event) = &body.private_read_receipt { - let count = services() - .rooms - .timeline - .get_pdu_count(event)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?; + let count = services().rooms.timeline.get_pdu_count(event)?.ok_or( + Error::BadRequest(ErrorKind::InvalidParam, "Event does not exist."), + )?; let count = match count { PduCount::Backfilled(_) => { return Err(Error::BadRequest( @@ -59,11 +58,11 @@ pub async fn set_read_marker_route( } PduCount::Normal(c) => c, }; - services() - .rooms - .edus - .read_receipt - .private_read_set(&body.room_id, sender_user, count)?; + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + count, + )?; } if let Some(event) = &body.read_receipt { @@ -86,26 +85,32 @@ pub async fn set_read_marker_route( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), + content: ruma::events::receipt::ReceiptEventContent( + receipt_content, + ), room_id: body.room_id.clone(), }, )?; + for server in services().rooms.state_cache.room_servers(&body.room_id) { + services().sending.trigger_edu_send(&server?)?; + } } - Ok(set_read_marker::v3::Response {}) + Ok(Ra(set_read_marker::v3::Response {})) } /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// /// Sets private read marker and public read receipt EDU. -pub async fn create_receipt_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_receipt_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if matches!( &body.receipt_type, - create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate + create_receipt::v3::ReceiptType::Read + | create_receipt::v3::ReceiptType::ReadPrivate ) { services() .rooms @@ -115,16 +120,15 @@ pub async fn create_receipt_route( match body.receipt_type { create_receipt::v3::ReceiptType::FullyRead => { - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { + let fully_read_event = + ruma::events::fully_read::FullyReadEventContent { event_id: body.event_id.clone(), - }, - }; - services().account_data.update( - Some(&body.room_id), + }; + services().account_data.update_room( + &body.room_id, sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), + &Raw::new(&fully_read_event) + .expect("json event serialization should always succeed"), )?; } create_receipt::v3::ReceiptType::Read => { @@ -140,16 +144,23 @@ pub async fn create_receipt_route( receipts.insert(ReceiptType::Read, user_receipts); let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.to_owned(), receipts); + receipt_content.insert(body.event_id.clone(), receipts); services().rooms.edus.read_receipt.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), + content: ruma::events::receipt::ReceiptEventContent( + receipt_content, + ), room_id: body.room_id.clone(), }, )?; + for server in + services().rooms.state_cache.room_servers(&body.room_id) + { + services().sending.trigger_edu_send(&server?)?; + } } create_receipt::v3::ReceiptType::ReadPrivate => { let count = services() @@ -178,5 +189,5 @@ pub async fn create_receipt_route( _ => return Err(Error::bad_database("Unsupported receipt type")), } - Ok(create_receipt::v3::Response {}) + Ok(Ra(create_receipt::v3::Response {})) } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index f0603f4b..46af6d8c 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,34 +1,27 @@ -use std::sync::Arc; - -use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, }; - use serde_json::value::to_raw_value; +use crate::{service::pdu::PduBuilder, services, Ar, Ra, Result}; + /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -pub async fn redact_event_route( - body: Ruma, -) -> Result { +pub(crate) async fn redact_event_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; let event_id = services() .rooms @@ -46,13 +39,14 @@ pub async fn redact_event_route( redacts: Some(body.event_id.into()), }, sender_user, - &body.room_id, - &state_lock, + &room_token, ) .await?; - drop(state_lock); + drop(room_token); let event_id = (*event_id).to_owned(); - Ok(redact_event::v3::Response { event_id }) + Ok(Ra(redact_event::v3::Response { + event_id, + })) } diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index 124f1310..c40224c9 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -1,134 +1,128 @@ -use ruma::api::client::relations::{ - get_relating_events, get_relating_events_with_rel_type, - get_relating_events_with_rel_type_and_event_type, +use ruma::{ + api::client::relations::{ + get_relating_events, get_relating_events_with_rel_type, + get_relating_events_with_rel_type_and_event_type, + }, + uint, }; -use crate::{service::rooms::timeline::PduCount, services, Result, Ruma}; +use crate::{service::rooms::timeline::PduCount, services, Ar, Ra, Result}; /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` -pub async fn get_relating_events_with_rel_type_and_event_type_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_relating_events_with_rel_type_and_event_type_route( + body: Ar, +) -> Result> +{ let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let from = match body.from.clone() { Some(from) => PduCount::try_from_string(&from)?, None => match ruma::api::Direction::Backward { // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), + ruma::api::Direction::Forward => PduCount::MIN, + ruma::api::Direction::Backward => PduCount::MAX, }, }; - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); + let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); + .map(|x| x.min(uint!(100))) + .unwrap_or(uint!(10)) + .try_into() + .expect("0-100 should fit in usize"); - let res = services() - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - Some(body.event_type.clone()), - Some(body.rel_type.clone()), - from, - to, - limit, - )?; + let res = services().rooms.pdu_metadata.paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + Some(&body.event_type), + Some(&body.rel_type), + from, + to, + limit, + )?; - Ok( - get_relating_events_with_rel_type_and_event_type::v1::Response { - chunk: res.chunk, - next_batch: res.next_batch, - prev_batch: res.prev_batch, - }, - ) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` -pub async fn get_relating_events_with_rel_type_route( - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let from = match body.from.clone() { - Some(from) => PduCount::try_from_string(&from)?, - None => match ruma::api::Direction::Backward { - // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), - }, - }; - - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); - - // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); - - let res = services() - .rooms - .pdu_metadata - .paginate_relations_with_filter( - sender_user, - &body.room_id, - &body.event_id, - None, - Some(body.rel_type.clone()), - from, - to, - limit, - )?; - - Ok(get_relating_events_with_rel_type::v1::Response { + Ok(Ra(get_relating_events_with_rel_type_and_event_type::v1::Response { chunk: res.chunk, next_batch: res.next_batch, prev_batch: res.prev_batch, - }) + // TODO + recursion_depth: None, + })) } -/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` -pub async fn get_relating_events_route( - body: Ruma, -) -> Result { +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}` +pub(crate) async fn get_relating_events_with_rel_type_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let from = match body.from.clone() { Some(from) => PduCount::try_from_string(&from)?, None => match ruma::api::Direction::Backward { // TODO: fix ruma so `body.dir` exists - ruma::api::Direction::Forward => PduCount::min(), - ruma::api::Direction::Backward => PduCount::max(), + ruma::api::Direction::Forward => PduCount::MIN, + ruma::api::Direction::Backward => PduCount::MAX, }, }; - let to = body - .to - .as_ref() - .and_then(|t| PduCount::try_from_string(t).ok()); + let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body .limit - .and_then(|u| u32::try_from(u).ok()) - .map_or(10_usize, |u| u as usize) - .min(100); + .map(|x| x.min(uint!(100))) + .unwrap_or(uint!(10)) + .try_into() + .expect("0-100 should fit in usize"); + + let res = services().rooms.pdu_metadata.paginate_relations_with_filter( + sender_user, + &body.room_id, + &body.event_id, + None, + Some(&body.rel_type), + from, + to, + limit, + )?; + + Ok(Ra(get_relating_events_with_rel_type::v1::Response { + chunk: res.chunk, + next_batch: res.next_batch, + prev_batch: res.prev_batch, + // TODO + recursion_depth: None, + })) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}` +pub(crate) async fn get_relating_events_route( + body: Ar, +) -> Result> { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let from = match body.from.clone() { + Some(from) => PduCount::try_from_string(&from)?, + None => match ruma::api::Direction::Backward { + // TODO: fix ruma so `body.dir` exists + ruma::api::Direction::Forward => PduCount::MIN, + ruma::api::Direction::Backward => PduCount::MAX, + }, + }; + + let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok()); + + // Use limit or else 10, with maximum 100 + let limit = body + .limit + .map(|x| x.min(uint!(100))) + .unwrap_or(uint!(10)) + .try_into() + .expect("0-100 should fit in usize"); services() .rooms @@ -143,4 +137,5 @@ pub async fn get_relating_events_route( to, limit, ) + .map(Ra) } diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index ab5027cd..e822f7ae 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -1,27 +1,24 @@ -use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, room::report_content}, events::room::message, int, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` /// /// Reports an inappropriate event to homeserver admins -/// -pub async fn report_event_route( - body: Ruma, -) -> Result { +pub(crate) async fn report_event_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? { - Some(pdu) => pdu, - _ => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid Event ID", - )) - } + let Some(pdu) = services().rooms.timeline.get_pdu(&body.event_id)? else { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid Event ID", + )); }; if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { @@ -29,41 +26,70 @@ pub async fn report_event_route( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); - }; + } if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 250 characters or fewer", )); - }; + } - services().admin - .send_message(message::RoomMessageEventContent::text_html( - format!( - "Report received from: {}\n\n\ - Event ID: {:?}\n\ - Room ID: {:?}\n\ - Sent By: {:?}\n\n\ - Report Score: {:?}\n\ - Report Reason: {:?}", - sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason - ), - format!( - "
Report received from: {0:?}\ -
  • Event Info
    • Event ID: {1:?}\ - 🔗
    • Room ID: {2:?}\ -
    • Sent By: {3:?}
  • \ - Report Info
    • Report Score: {4:?}
    • Report Reason: {5}
  • \ -
", - sender_user, - pdu.event_id, - pdu.room_id, - pdu.sender, - body.score, - HtmlEscape(body.reason.as_deref().unwrap_or("")) - ), - )); + services().admin.send_message(message::RoomMessageEventContent::text_html( + format!( + "Report received from: {}\n\nEvent ID: {:?}\nRoom ID: {:?}\nSent \ + By: {:?}\n\nReport Score: {:?}\nReport Reason: {:?}", + sender_user, + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score, + body.reason + ), + format!( + r#" +
+ + Report received from: + {0:?} + +
    +
  • + Event Info +
      +
    • + Event ID: + {1:?} + 🔗 +
    • +
    • + Room ID: + {2:?} +
    • +
    • + Sent By: + {3:?} +
    • +
    +
  • +
  • + Report Info +
      +
    • Report Score: {4:?}
    • +
    • Report Reason: {5}
    • +
    +
  • +
+
+ "#, + sender_user, + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score, + html_escape::encode_safe(body.reason.as_deref().unwrap_or("")) + ), + )); - Ok(report_content::v3::Response {}) + Ok(Ra(report_content::v3::Response {})) } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index e3e8a746..ebb5a26b 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,6 +1,5 @@ -use crate::{ - api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, -}; +use std::{cmp::max, collections::BTreeMap}; + use ruma::{ api::client::{ error::ErrorKind, @@ -11,7 +10,9 @@ use ruma::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + history_visibility::{ + HistoryVisibility, RoomHistoryVisibilityEventContent, + }, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, name::RoomNameEventContent, @@ -23,18 +24,22 @@ use ruma::{ }, int, serde::JsonObject, - CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, + CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; -use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; +use crate::{ + api::client_server::invite_helper, service::pdu::PduBuilder, services, + utils::room_version::RoomVersion, Ar, Error, Ra, Result, +}; + /// # `POST /_matrix/client/r0/createRoom` /// /// Creates a new room. /// /// - Room ID is randomly generated -/// - Create alias if room_alias_name is set +/// - Create alias if `room_alias_name` is set /// - Send create event /// - Join sender user /// - Send power levels event @@ -45,67 +50,56 @@ use tracing::{info, warn}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -pub async fn create_room_route( - body: Ruma, -) -> Result { +#[allow(clippy::too_many_lines)] +pub(crate) async fn create_room_route( + body: Ar, +) -> Result> { use create_room::v3::RoomPreset; - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = + body.sender_user.as_deref().expect("user is authenticated"); let room_id = RoomId::new(services().globals.server_name()); services().rooms.short.get_or_create_shortroomid(&room_id)?; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = + services().globals.roomid_mutex_state.lock_key(room_id.clone()).await; if !services().globals.allow_room_creation() && body.appservice_info.is_none() && !services().users.is_admin(sender_user)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Room creation has been disabled.", )); } let alias: Option = - body.room_alias_name - .as_ref() - .map_or(Ok(None), |localpart| { - // TODO: Check for invalid characters and maximum length - let alias = RoomAliasId::parse(format!( - "#{}:{}", - localpart, - services().globals.server_name() - )) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - - if services() - .rooms - .alias - .resolve_local_alias(&alias)? - .is_some() - { - Err(Error::BadRequest( - ErrorKind::RoomInUse, - "Room alias already exists.", - )) - } else { - Ok(Some(alias)) - } + body.room_alias_name.as_ref().map_or(Ok(None), |localpart| { + // TODO: Check for invalid characters and maximum length + let alias = RoomAliasId::parse(format!( + "#{}:{}", + localpart, + services().globals.server_name() + )) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") })?; - if let Some(ref alias) = alias { - if let Some(ref info) = body.appservice_info { + if services().rooms.alias.resolve_local_alias(&alias)?.is_some() { + Err(Error::BadRequest( + ErrorKind::RoomInUse, + "Room alias already exists.", + )) + } else { + Ok(Some(alias)) + } + })?; + + if let Some(alias) = &alias { + if let Some(info) = &body.appservice_info { if !info.aliases.is_match(alias.as_str()) { return Err(Error::BadRequest( ErrorKind::Exclusive, @@ -120,7 +114,7 @@ pub async fn create_room_route( } } - let room_version = match body.room_version.clone() { + let room_version_id = match body.room_version.clone() { Some(room_version) => { if services() .globals @@ -137,79 +131,66 @@ pub async fn create_room_route( } None => services().globals.default_room_version(), }; + let room_version = RoomVersion::try_from(&room_version_id)?; - let content = match &body.creation_content { - Some(content) => { - let mut content = content - .deserialize_as::() - .expect("Invalid creation content"); - - match room_version { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => { - content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, - ); - } - RoomVersionId::V11 => {} // V11 removed the "creator" key - _ => unreachable!("Validity of room version already checked"), - } + let content = if let Some(content) = &body.creation_content { + let mut content = content + .deserialize_as::() + .expect("Invalid creation content"); + if room_version.create_event_creator_prop { content.insert( - "room_version".into(), - json!(room_version.as_str()).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + "creator".into(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + ) })?, ); - content - } - None => { - let content = match room_version { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => RoomCreateEventContent::new_v1(sender_user.clone()), - RoomVersionId::V11 => RoomCreateEventContent::new_v11(), - _ => unreachable!("Validity of room version already checked"), - }; - let mut content = serde_json::from_str::( - to_raw_value(&content) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? - .get(), - ) - .unwrap(); - content.insert( - "room_version".into(), - json!(room_version.as_str()).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, - ); - content } + content.insert( + "room_version".into(), + json!(room_version_id.as_str()).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + ) + })?, + ); + content + } else { + let content = if room_version.create_event_creator_prop { + RoomCreateEventContent::new_v1(sender_user.to_owned()) + } else { + RoomCreateEventContent::new_v11() + }; + let mut content = serde_json::from_str::( + to_raw_value(&content) + .map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + ) + })? + .get(), + ) + .unwrap(); + content.insert( + "room_version".into(), + json!(room_version_id.as_str()).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + ) + })?, + ); + content }; // Validate creation content let de_result = serde_json::from_str::( - to_raw_value(&content) - .expect("Invalid creation content") - .get(), + to_raw_value(&content).expect("Invalid creation content").get(), ); if de_result.is_err() { @@ -226,14 +207,14 @@ pub async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), + content: to_raw_value(&content) + .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; @@ -260,8 +241,7 @@ pub async fn create_room_route( redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; @@ -271,11 +251,11 @@ pub async fn create_room_route( let preset = body.preset.clone().unwrap_or(match &body.visibility { room::Visibility::Private => RoomPreset::PrivateChat, room::Visibility::Public => RoomPreset::PublicChat, - _ => RoomPreset::PrivateChat, // Room visibility should not be custom + _ => unimplemented!("unknown room visibility"), }); let mut users = BTreeMap::new(); - users.insert(sender_user.clone(), int!(100)); + users.insert(sender_user.to_owned(), int!(100)); if preset == RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { @@ -283,17 +263,24 @@ pub async fn create_room_route( } } - let mut power_levels_content = serde_json::to_value(RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"); + let mut power_levels_content = + serde_json::to_value(RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"); - if let Some(power_level_content_override) = &body.power_level_content_override { - let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) - .map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })?; + if let Some(power_level_content_override) = + &body.power_level_content_override + { + let json: JsonObject = + serde_json::from_str(power_level_content_override.json().get()) + .map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Invalid power_level_content_override.", + ) + })?; for (key, value) in json { power_levels_content[key] = value; @@ -309,12 +296,11 @@ pub async fn create_room_route( content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; @@ -332,12 +318,11 @@ pub async fn create_room_route( }) .expect("We checked that alias earlier, it must be fine"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; } @@ -351,19 +336,20 @@ pub async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { - RoomPreset::PublicChat => JoinRule::Public, - // according to spec "invite" is the default - _ => JoinRule::Invite, - })) + content: to_raw_value(&RoomJoinRulesEventContent::new( + match preset { + RoomPreset::PublicChat => JoinRule::Public, + // according to spec "invite" is the default + _ => JoinRule::Invite, + }, + )) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; @@ -379,12 +365,11 @@ pub async fn create_room_route( )) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; @@ -395,30 +380,35 @@ pub async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - RoomPreset::PublicChat => GuestAccess::Forbidden, - _ => GuestAccess::CanJoin, - })) + content: to_raw_value(&RoomGuestAccessEventContent::new( + match preset { + RoomPreset::PublicChat => GuestAccess::Forbidden, + _ => GuestAccess::CanJoin, + }, + )) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; // 6. Events listed in initial_state for event in &body.initial_state { - let mut pdu_builder = event.deserialize_as::().map_err(|e| { - warn!("Invalid initial state event: {:?}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") - })?; + let mut pdu_builder = + event.deserialize_as::().map_err(|error| { + warn!(%error, "Invalid initial state event"); + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid initial state event.", + ) + })?; // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); + pdu_builder.state_key.get_or_insert_with(String::new); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption @@ -430,7 +420,7 @@ pub async fn create_room_route( services() .rooms .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .build_and_append_pdu(pdu_builder, sender_user, &room_token) .await?; } @@ -442,15 +432,16 @@ pub async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(name.clone())) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomNameEventContent::new( + name.clone(), + )) + .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; } @@ -467,54 +458,57 @@ pub async fn create_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &room_id, - &state_lock, + &room_token, ) .await?; } // 8. Events implied by invite (and TODO: invite_3pid) - drop(state_lock); + drop(room_token); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await; + if let Err(error) = + invite_helper(sender_user, user_id, &room_id, None, body.is_direct) + .await + { + warn!(%error, "Invite helper failed"); + }; } // Homeserver specific stuff if let Some(alias) = alias { - services().rooms.alias.set_alias(&alias, &room_id)?; + services().rooms.alias.set_alias(&alias, &room_id, sender_user)?; } if body.visibility == room::Visibility::Public { services().rooms.directory.set_public(&room_id)?; } - info!("{} created a room", sender_user); + info!(user_id = %sender_user, room_id = %room_id, "User created a room"); - Ok(create_room::v3::Response::new(room_id)) + Ok(Ra(create_room::v3::Response::new(room_id))) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// /// Gets a single event. /// -/// - You have to currently be joined to the room (TODO: Respect history visibility) -pub async fn get_room_event_route( - body: Ruma, -) -> Result { +/// - You have to currently be joined to the room (TODO: Respect history +/// visibility) +pub(crate) async fn get_room_event_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services() - .rooms - .timeline - .get_pdu(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); + let event = services().rooms.timeline.get_pdu(&body.event_id)?.ok_or_else( + || { + warn!(event_id = %body.event_id, "Event not found"); Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + }, + )?; if !services().rooms.state_accessor.user_can_see_event( sender_user, @@ -522,7 +516,7 @@ pub async fn get_room_event_route( &body.event_id, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this event.", )); } @@ -530,40 +524,37 @@ pub async fn get_room_event_route( let mut event = (*event).clone(); event.add_age()?; - Ok(get_room_event::v3::Response { + Ok(Ra(get_room_event::v3::Response { event: event.to_room_event(), - }) + })) } /// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` /// /// Lists all aliases of the room. /// -/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -pub async fn get_room_aliases_route( - body: Ruma, -) -> Result { +/// - Only users joined to the room are allowed to call this TODO: Allow any +/// user to call it if `history_visibility` is world readable +pub(crate) async fn get_room_aliases_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services() - .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } - Ok(aliases::v3::Response { + Ok(Ra(aliases::v3::Response { aliases: services() .rooms .alias .local_aliases_for_room(&body.room_id) - .filter_map(|a| a.ok()) + .filter_map(Result::ok) .collect(), - }) + })) } /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -576,42 +567,34 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -pub async fn upgrade_room_route( - body: Ruma, -) -> Result { +#[allow(clippy::too_many_lines)] +pub(crate) async fn upgrade_room_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services() - .globals - .supported_room_versions() - .contains(&body.new_version) + if !services().globals.supported_room_versions().contains(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", )); } + let new_version = RoomVersion::try_from(&body.new_version)?; // Create a replacement room let replacement_room = RoomId::new(services().globals.server_name()); - services() - .rooms - .short - .get_or_create_shortroomid(&replacement_room)?; + services().rooms.short.get_or_create_shortroomid(&replacement_room)?; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let original_room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; - // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further - // Fail if the sender does not have the required permissions + // Send a m.room.tombstone event to the old room to indicate that it is not + // intended to be used any further Fail if the sender does not have the + // required permissions let tombstone_event_id = services() .rooms .timeline @@ -624,27 +607,20 @@ pub async fn upgrade_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &body.room_id, - &state_lock, + &original_room_token, ) .await?; // Change lock to replacement room - drop(state_lock); - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(replacement_room.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let replacement_room_token = services() + .globals + .roomid_mutex_state + .lock_key(replacement_room.clone()) + .await; // Get the old room creation event let mut create_event_content = serde_json::from_str::( @@ -652,7 +628,9 @@ pub async fn upgrade_room_route( .rooms .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? + .ok_or_else(|| { + Error::bad_database("Found room without m.room.create event.") + })? .content .get(), ) @@ -664,42 +642,38 @@ pub async fn upgrade_room_route( (*tombstone_event_id).to_owned(), )); - // Send a m.room.create event containing a predecessor field and the applicable room_version - match body.new_version { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => { - create_event_content.insert( - "creator".into(), - json!(&sender_user).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Error forming creation event") - })?, - ); - } - RoomVersionId::V11 => { - // "creator" key no longer exists in V11 rooms - create_event_content.remove("creator"); - } - _ => unreachable!("Validity of room version already checked"), + // Send a m.room.create event containing a predecessor field and the + // applicable room_version + if new_version.create_event_creator_prop { + create_event_content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + ) + })?, + ); + } else { + create_event_content.remove("creator"); } create_event_content.insert( "room_version".into(), - json!(&body.new_version) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + json!(&body.new_version).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + ) + })?, ); create_event_content.insert( "predecessor".into(), - json!(predecessor) - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + json!(predecessor).try_into().map_err(|_| { + Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + ) + })?, ); // Validate creation event content @@ -725,12 +699,11 @@ pub async fn upgrade_room_route( content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &replacement_room, - &state_lock, + &replacement_room_token, ) .await?; @@ -757,8 +730,7 @@ pub async fn upgrade_room_route( redacts: None, }, sender_user, - &replacement_room, - &state_lock, + &replacement_room_token, ) .await?; @@ -777,15 +749,15 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = - match services() - .rooms - .state_accessor - .room_state_get(&body.room_id, &event_type, "")? - { - Some(v) => v.content.clone(), - None => continue, // Skipping missing events. - }; + let event_content = match services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &event_type, "")? + { + Some(v) => v.content.clone(), + // Skipping missing events. + None => continue, + }; services() .rooms @@ -795,12 +767,11 @@ pub async fn upgrade_room_route( event_type: event_type.to_string().into(), content: event_content, unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &replacement_room, - &state_lock, + &replacement_room_token, ) .await?; } @@ -810,32 +781,45 @@ pub async fn upgrade_room_route( .rooms .alias .local_aliases_for_room(&body.room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) { - services() - .rooms - .alias - .set_alias(&alias, &replacement_room)?; + services().rooms.alias.set_alias( + &alias, + &replacement_room, + sender_user, + )?; } // Get the old room power levels - let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - services() - .rooms - .state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? - .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid room event in database."))?; + let mut power_levels_event_content: RoomPowerLevelsEventContent = + serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get( + &body.room_id, + &StateEventType::RoomPowerLevels, + "", + )? + .ok_or_else(|| { + Error::bad_database( + "Found room without m.room.create event.", + ) + })? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid room event in database."))?; - // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); + // Setting events_default and invite to the greater of 50 and users_default + // + 1 + let new_level = + max(int!(50), power_levels_event_content.users_default + int!(1)); power_levels_event_content.events_default = new_level; power_levels_event_content.invite = new_level; - // Modify the power levels in the old room to prevent sending of events and inviting new users + // Modify the power levels in the old room to prevent sending of events and + // inviting new users let _ = services() .rooms .timeline @@ -845,17 +829,16 @@ pub async fn upgrade_room_route( content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, sender_user, - &body.room_id, - &state_lock, + &original_room_token, ) .await?; - drop(state_lock); - // Return the replacement room id - Ok(upgrade_room::v3::Response { replacement_room }) + Ok(Ra(upgrade_room::v3::Response { + replacement_room, + })) } diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index e9fac365..cd36a13b 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,22 +1,31 @@ -use crate::{services, Error, Result, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - search::search_events::{ - self, - v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, +use std::collections::BTreeMap; + +use ruma::{ + api::client::{ + error::ErrorKind, + search::search_events::{ + self, + v3::{ + EventContextResult, ResultCategories, ResultRoomEvents, + SearchResult, + }, + }, }, + uint, UInt, }; -use std::collections::BTreeMap; +use crate::{services, Ar, Error, Ra, Result}; /// # `POST /_matrix/client/r0/search` /// /// Searches rooms for messages. /// -/// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -pub async fn search_events_route( - body: Ruma, -) -> Result { +/// - Only works if the user is currently joined to the room (TODO: Respect +/// history visibility) +#[allow(clippy::too_many_lines)] +pub(crate) async fn search_events_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -27,23 +36,24 @@ pub async fn search_events_route( .rooms .state_cache .rooms_joined(sender_user) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect() }); // Use limit or else 10, with maximum 100 - let limit = filter.limit.map_or(10, u64::from).min(100) as usize; + let limit = filter + .limit + .map(|x| x.min(uint!(100))) + .unwrap_or(uint!(10)) + .try_into() + .expect("0-100 should fit in usize"); let mut searches = Vec::new(); for room_id in room_ids { - if !services() - .rooms - .state_cache - .is_joined(sender_user, &room_id)? - { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view this room.", )); } @@ -65,7 +75,8 @@ pub async fn search_events_route( "Invalid next_batch token.", )) } - None => 0, // Default to the start + // Default to the start + None => 0, }; let mut results = Vec::new(); @@ -73,7 +84,9 @@ pub async fn search_events_route( if let Some(s) = searches .iter_mut() .map(|s| (s.peek().cloned(), s)) - .max_by_key(|(peek, _)| peek.clone()) + .max_by_key(|(peek, _)| { + peek.as_ref().map(|id| id.as_bytes().to_vec()) + }) .and_then(|(_, i)| i.next()) { results.push(s); @@ -82,6 +95,7 @@ pub async fn search_events_route( let results: Vec<_> = results .iter() + .skip(skip) .filter_map(|result| { services() .rooms @@ -89,11 +103,16 @@ pub async fn search_events_route( .get_pdu_from_id(result) .ok()? .filter(|pdu| { - services() - .rooms - .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) - .unwrap_or(false) + !pdu.is_redacted() + && services() + .rooms + .state_accessor + .user_can_see_event( + sender_user, + &pdu.room_id, + &pdu.event_id, + ) + .unwrap_or(false) }) .map(|pdu| pdu.to_room_event()) }) @@ -110,29 +129,33 @@ pub async fn search_events_route( result: Some(result), }) }) - .filter_map(|r| r.ok()) - .skip(skip) + .filter_map(Result::ok) .take(limit) .collect(); - let next_batch = if results.len() < limit { - None - } else { - Some((skip + limit).to_string()) - }; + let more_unloaded_results = searches.iter_mut().any(|s| s.peek().is_some()); + let next_batch = more_unloaded_results.then(|| (skip + limit).to_string()); - Ok(search_events::v3::Response::new(ResultCategories { + Ok(Ra(search_events::v3::Response::new(ResultCategories { room_events: ResultRoomEvents { - count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it - groups: BTreeMap::new(), // TODO + // TODO(compat): this is not a good estimate of the total number of + // results. we should just be returning None, but + // element incorrectly relies on this field. Switch back + // to None when [1] is fixed + // + // [1]: https://github.com/element-hq/element-web/issues/27517 + count: Some(results.len().try_into().unwrap_or(UInt::MAX)), + // TODO + groups: BTreeMap::new(), next_batch, results, - state: BTreeMap::new(), // TODO + // TODO + state: BTreeMap::new(), highlights: search_criteria .search_term .split_terminator(|c: char| !c.is_alphanumeric()) .map(str::to_lowercase) .collect(), }, - })) + }))) } diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 3e583fac..228ab4d3 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,9 +1,13 @@ -use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - session::{get_login_types, login, logout, logout_all}, + session::{ + get_login_types::{ + self, + v3::{ApplicationServiceLoginType, PasswordLoginType}, + }, + login, logout, logout_all, + }, uiaa::UserIdentifier, }, UserId, @@ -11,39 +15,50 @@ use ruma::{ use serde::Deserialize; use tracing::{info, warn}; +use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; +use crate::{services, utils, Ar, Error, Ra, Result}; + #[derive(Debug, Deserialize)] struct Claims { sub: String, - //exp: usize, } /// # `GET /_matrix/client/r0/login` /// -/// Get the supported login types of this server. One of these should be used as the `type` field -/// when logging in. -pub async fn get_login_types_route( - _body: Ruma, -) -> Result { - Ok(get_login_types::v3::Response::new(vec![ - get_login_types::v3::LoginType::Password(Default::default()), - get_login_types::v3::LoginType::ApplicationService(Default::default()), - ])) +/// Get the supported login types of this server. One of these should be used as +/// the `type` field when logging in. +pub(crate) async fn get_login_types_route( + _body: Ar, +) -> Result> { + Ok(Ra(get_login_types::v3::Response::new(vec![ + get_login_types::v3::LoginType::Password(PasswordLoginType::default()), + get_login_types::v3::LoginType::ApplicationService( + ApplicationServiceLoginType::default(), + ), + ]))) } /// # `POST /_matrix/client/r0/login` /// -/// Authenticates the user and returns an access token it can use in subsequent requests. +/// Authenticates the user and returns an access token it can use in subsequent +/// requests. /// -/// - The user needs to authenticate using their password (or if enabled using a json web token) +/// - The user needs to authenticate using their password (or if enabled using a +/// json web token) /// - If `device_id` is known: invalidates old access token of that device /// - If `device_id` is unknown: creates a new device /// - Returns access token that is associated with the user and device /// -/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see -/// supported login types. -pub async fn login_route(body: Ruma) -> Result { +/// Note: You can use [`GET /_matrix/client/r0/login`](get_login_types_route) to +/// see supported login types. +#[allow( // To allow deprecated login methods - #![allow(deprecated)] + deprecated, + clippy::too_many_lines, +)] +pub(crate) async fn login_route( + body: Ar, +) -> Result> { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -51,21 +66,31 @@ pub async fn login_route(body: Ruma) -> Result { - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services().globals.server_name(), - ) - } else if let Some(user) = user { - UserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = + identifier + { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services().globals.server_name(), + ) + } else if let Some(user) = user { + UserId::parse(user) + } else { + warn!(kind = ?body.login_info, "Bad login kind"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Bad login type.", + )); + } + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ) + })?; if services().appservice.is_exclusive_user_id(&user_id).await { return Err(Error::BadRequest( @@ -74,13 +99,12 @@ pub async fn login_route(body: Ruma) -> Result) -> Result { - if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { + login::v3::LoginInfo::Token(login::v3::Token { + token, + }) => { + if let Some(jwt_decoding_key) = + services().globals.jwt_decoding_key() + { let token = jsonwebtoken::decode::( token, jwt_decoding_key, &jsonwebtoken::Validation::default(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidUsername, + "Token is invalid.", + ) + })?; let username = token.claims.sub.to_lowercase(); - let user_id = - UserId::parse_with_server_name(username, services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + let user_id = UserId::parse_with_server_name( + username, + services().globals.server_name(), + ) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ) + })?; if services().appservice.is_exclusive_user_id(&user_id).await { return Err(Error::BadRequest( @@ -126,28 +162,42 @@ pub async fn login_route(body: Ruma) -> Result { - let user_id = if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services().globals.server_name(), - ) - } else if let Some(user) = user { - UserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + login::v3::LoginInfo::ApplicationService( + login::v3::ApplicationService { + identifier, + user, + }, + ) => { + let user_id = + if let Some(UserIdentifier::UserIdOrLocalpart(user_id)) = + identifier + { + UserId::parse_with_server_name( + user_id.to_lowercase(), + services().globals.server_name(), + ) + } else if let Some(user) = user { + UserId::parse(user) + } else { + warn!(kind = ?body.login_info, "Bad login kind"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Bad login type.", + )); + } + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ) + })?; - if let Some(ref info) = body.appservice_info { + if let Some(info) = &body.appservice_info { if !info.is_user_match(&user_id) { return Err(Error::BadRequest( ErrorKind::Exclusive, @@ -164,7 +214,7 @@ pub async fn login_route(body: Ruma) -> Result { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); + warn!(kind = ?body.login_info, "Unsupported or unknown login kind"); return Err(Error::BadRequest( ErrorKind::Unknown, "Unsupported login type.", @@ -182,11 +232,11 @@ pub async fn login_route(body: Ruma) -> Result) -> Result) -> Result) -> Result) -> Result { +pub(crate) async fn logout_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let sender_device = + body.sender_device.as_ref().expect("user is authenticated"); - if let Some(ref info) = body.appservice_info { + if let Some(info) = &body.appservice_info { if !info.is_user_match(sender_user) { return Err(Error::BadRequest( ErrorKind::Exclusive, @@ -238,7 +292,9 @@ pub async fn logout_route(body: Ruma) -> Result) -> Result, -) -> Result { +/// Note: This is equivalent to calling [`GET +/// /_matrix/client/r0/logout`](logout_route) from each device of this user. +pub(crate) async fn logout_all_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let Some(ref info) = body.appservice_info { + if let Some(info) = &body.appservice_info { if !info.is_user_match(sender_user) { return Err(Error::BadRequest( ErrorKind::Exclusive, @@ -275,5 +332,5 @@ pub async fn logout_all_route( services().users.remove_device(sender_user, &device_id)?; } - Ok(logout_all::v3::Response::new()) + Ok(Ra(logout_all::v3::Response::new())) } diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs index e2ea8c34..930814e8 100644 --- a/src/api/client_server/space.rs +++ b/src/api/client_server/space.rs @@ -1,23 +1,32 @@ -use crate::{services, Result, Ruma}; -use ruma::api::client::space::get_hierarchy; +use ruma::{api::client::space::get_hierarchy, uint}; -/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` +use crate::{services, Ar, Ra, Result}; + +/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` /// -/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space. -pub async fn get_hierarchy_route( - body: Ruma, -) -> Result { +/// Paginates over the space tree in a depth-first manner to locate child rooms +/// of a given space. +pub(crate) async fn get_hierarchy_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let skip = body - .from - .as_ref() - .and_then(|s| s.parse::().ok()) - .unwrap_or(0); + let skip = + body.from.as_ref().and_then(|s| s.parse::().ok()).unwrap_or(0); - let limit = body.limit.map_or(10, u64::from).min(100) as usize; + let limit = body + .limit + .map(|x| x.min(uint!(100))) + .unwrap_or(uint!(10)) + .try_into() + .expect("0-100 should fit in usize"); - let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself + // Plus one to skip the space room itself + let max_depth = usize::try_from( + body.max_depth.map(|x| x.min(uint!(10))).unwrap_or(uint!(3)), + ) + .expect("0-10 should fit in usize") + + 1; services() .rooms @@ -31,4 +40,5 @@ pub async fn get_hierarchy_route( body.suggested_only, ) .await + .map(Ra) } diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index e62aa013..2e6f3b13 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,42 +1,49 @@ -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; -use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ error::ErrorKind, state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ - room::canonical_alias::RoomCanonicalAliasEventContent, AnyStateEventContent, StateEventType, + room::canonical_alias::RoomCanonicalAliasEventContent, + AnyStateEventContent, StateEventType, }, serde::Raw, - EventId, RoomId, UserId, + EventId, RoomAliasId, RoomId, UserId, }; -use tracing::log::warn; +use serde::Deserialize; +use tracing::warn; + +use crate::{service::pdu::PduBuilder, services, Ar, Error, Ra, Result}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// /// Sends a state event into the room. /// /// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is allowed -/// - If event is new canonical_alias: Rejects if alias is incorrect -pub async fn send_state_event_for_key_route( - body: Ruma, -) -> Result { +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +/// - If event is new `canonical_alias`: Rejects if alias is incorrect +pub(crate) async fn send_state_event_for_key_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( sender_user, &body.room_id, &body.event_type, - &body.body.body, // Yes, I hate it too - body.state_key.to_owned(), + // Yes, I hate it too + &body.body.body, + body.state_key.clone(), ) .await?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::v3::Response { event_id }) + Ok(Ra(send_state_event::v3::Response { + event_id, + })) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -44,17 +51,20 @@ pub async fn send_state_event_for_key_route( /// Sends a state event into the room. /// /// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is allowed -/// - If event is new canonical_alias: Rejects if alias is incorrect -pub async fn send_state_event_for_empty_key_route( - body: Ruma, -) -> Result> { +/// - Tries to send the event into the room, auth rules will determine if it is +/// allowed +/// - If event is new `canonical_alias`: Rejects if alias is incorrect +pub(crate) async fn send_state_event_for_empty_key_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption + && !services().globals.allow_encryption() + { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Encryption has been disabled", )); } @@ -64,22 +74,26 @@ pub async fn send_state_event_for_empty_key_route( &body.room_id, &body.event_type.to_string().into(), &body.body.body, - body.state_key.to_owned(), + body.state_key.clone(), ) .await?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::v3::Response { event_id }.into()) + Ok(send_state_event::v3::Response { + event_id, + } + .into()) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state` /// /// Get all state events for a room. /// -/// - If not joined: Only works if current room history visibility is world readable -pub async fn get_state_events_route( - body: Ruma, -) -> Result { +/// - If not joined: Only works if current room history visibility is world +/// readable +pub(crate) async fn get_state_events_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() @@ -88,12 +102,12 @@ pub async fn get_state_events_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view the room state.", )); } - Ok(get_state_events::v3::Response { + Ok(Ra(get_state_events::v3::Response { room_state: services() .rooms .state_accessor @@ -102,17 +116,18 @@ pub async fn get_state_events_route( .values() .map(|pdu| pdu.to_state_event()) .collect(), - }) + })) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}` /// /// Get single state event of a room. /// -/// - If not joined: Only works if current room history visibility is world readable -pub async fn get_state_events_for_key_route( - body: Ruma, -) -> Result { +/// - If not joined: Only works if current room history visibility is world +/// readable +pub(crate) async fn get_state_events_for_key_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() @@ -121,7 +136,7 @@ pub async fn get_state_events_for_key_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view the room state.", )); } @@ -138,20 +153,22 @@ pub async fn get_state_events_for_key_route( Error::BadRequest(ErrorKind::NotFound, "State event not found.") })?; - Ok(get_state_events_for_key::v3::Response { - content: serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content in database"))?, - }) + Ok(Ra(get_state_events_for_key::v3::Response { + content: serde_json::from_str(event.content.get()).map_err(|_| { + Error::bad_database("Invalid event content in database") + })?, + })) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}` /// /// Get single state event of a room. /// -/// - If not joined: Only works if current room history visibility is world readable -pub async fn get_state_events_for_empty_key_route( - body: Ruma, -) -> Result> { +/// - If not joined: Only works if current room history visibility is world +/// readable +pub(crate) async fn get_state_events_for_empty_key_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services() @@ -160,7 +177,7 @@ pub async fn get_state_events_for_empty_key_route( .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You don't have permission to view the room state.", )); } @@ -178,8 +195,9 @@ pub async fn get_state_events_for_empty_key_route( })?; Ok(get_state_events_for_key::v3::Response { - content: serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content in database"))?, + content: serde_json::from_str(event.content.get()).map_err(|_| { + Error::bad_database("Invalid event content in database") + })?, } .into()) } @@ -193,45 +211,15 @@ async fn send_state_event_for_key_helper( ) -> Result> { let sender_user = sender; - // TODO: Review this check, error if event is unparsable, use event type, allow alias if it - // previously existed - if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) - { - let mut aliases = canonical_alias.alt_aliases.clone(); - - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } - - for alias in aliases { - if alias.server_name() != services().globals.server_name() - || services() - .rooms - .alias - .resolve_local_alias(&alias)? - .filter(|room| room == room_id) // Make sure it's the right room - .is_none() - { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You are only allowed to send canonical_alias \ - events when it's aliases already exists", - )); - } - } + if event_type == &StateEventType::RoomCanonicalAlias { + validate_canonical_alias_event(room_id, json.cast_ref()).await?; } - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; let event_id = services() .rooms @@ -239,16 +227,109 @@ async fn send_state_event_for_key_helper( .build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), - content: serde_json::from_str(json.json().get()).expect("content is valid json"), + content: serde_json::from_str(json.json().get()) + .expect("content is valid json"), unsigned: None, state_key: Some(state_key), redacts: None, }, sender_user, - room_id, - &state_lock, + &room_token, ) .await?; Ok(event_id) } + +/// Checks that a new `m.room.canonical_alias` event is valid, by the spec's +/// requirements. +/// +/// From the [spec]: +/// +/// > If the event type being sent is m.room.canonical_alias servers SHOULD +/// > ensure that any new aliases being listed in the event are valid per their +/// > grammar/syntax and that they point to the room ID where the state event +/// > is to be sent. Servers do not validate aliases which are being removed or +/// > are already present in the state event. +/// +/// [spec]: https://spec.matrix.org/v1.13/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey +async fn validate_canonical_alias_event( + room_id: &RoomId, + json: &Raw, +) -> Result<()> { + // Use a custom struct instead of RoomCanonicalAliasEventContent because we + // only want to validate the syntax of new aliases, so can't deserialize + // everything to OwnedRoomAliasId. + #[derive(Deserialize)] + struct Extract { + alias: Option, + #[serde(default)] + alt_aliases: Vec, + } + + // If the existing canonical alias event is invalid, treat it as if there + // are no existing aliases instead of erroring out. This allows users to + // fix a bad canonical alias event by sending a new one, but means that + // every alias in the new event will be revalidated. + let old_event = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? + .and_then(|old_event| { + serde_json::from_str::(old_event.content.get()) + .inspect_err(|error| { + warn!( + %error, + event_id=%old_event.event_id, + "Invalid canonical alias event in database" + ); + }) + .ok() + }); + + let old_aliases = if let Some(old_event) = &old_event { + old_event.alias.iter().chain(old_event.alt_aliases.iter()).collect() + } else { + HashSet::new() + }; + + if let Ok(canonical_alias) = + serde_json::from_str::(json.json().get()) + { + let aliases = canonical_alias + .alias + .iter() + .chain(canonical_alias.alt_aliases.iter()); + let new_aliases = aliases.filter(|alias| !old_aliases.contains(alias)); + + for alias in new_aliases { + let alias = RoomAliasId::parse(alias).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "One or more aliases in m.room.canonical_alias event have \ + invalid syntax", + ) + })?; + + // The spec doesn't say explicitly that we should allow adding new + // remote canonical aliases, but it's reasonable behavior and what + // synapse does. + if services().rooms.alias.resolve_alias(&alias).await?.as_deref() + != Some(room_id) + { + return Err(Error::BadRequest( + ErrorKind::BadAlias, + "You are only allowed to send canonical_alias events when \ + it's aliases already exists", + )); + } + } + } else { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "m.room.canonical_alias event did not match expected schema", + )); + } + + Ok(()) +} diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index e0c6e0b9..4ad37425 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,1159 +1,11 @@ +use ruma::{events::StateEventType, RoomId, UserId}; +use tracing::error; + use crate::{ - service::{pdu::EventHash, rooms::timeline::PduCount}, - services, utils, Error, PduEvent, Result, Ruma, RumaResponse, + service::rooms::timeline::PduCount, services, Error, PduEvent, Result, }; -use ruma::{ - api::client::{ - filter::{FilterDefinition, LazyLoadOptions}, - sync::sync_events::{ - self, - v3::{ - Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, - LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, - }, - v4::SlidingOp, - DeviceLists, UnreadNotificationsCount, - }, - uiaa::UiaaResponse, - }, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, TimelineEventType, - }, - serde::Raw, - uint, DeviceId, EventId, JsOption, OwnedDeviceId, OwnedUserId, RoomId, UInt, UserId, -}; -use std::{ - collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, - sync::Arc, - time::Duration, -}; -use tokio::sync::watch::Sender; -use tracing::{error, info}; - -/// # `GET /_matrix/client/r0/sync` -/// -/// Synchronize the client's state with the latest state on the server. -/// -/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a -/// previous request for incremental syncs. -/// -/// Calling this endpoint without a `since` parameter returns: -/// - Some of the most recent events of each timeline -/// - Notification counts for each room -/// - Joined and invited member counts, heroes -/// - All state events -/// -/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: -/// For joined rooms: -/// - Some of the most recent events of each timeline that happened after since -/// - If user joined the room after since: All state events (unless lazy loading is activated) and -/// all device list updates in that room -/// - If the user was already in the room: A list of all events that are in the state now, but were -/// not in the state at `since` -/// - If the state we send contains a member event: Joined and invited member counts, heroes -/// - Device list updates that happened after `since` -/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts -/// - EDUs that are active now (read receipts, typing updates, presence) -/// - TODO: Allow multiple sync streams to support Pantalaimon -/// -/// For invited rooms: -/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite -/// -/// For left rooms: -/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave) -/// -/// - Sync is handled in an async task, multiple requests from the same device with the same -/// `since` will be cached -pub async fn sync_events_route( - body: Ruma, -) -> Result> { - let sender_user = body.sender_user.expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); - let body = body.body; - - let mut rx = match services() - .globals - .sync_receivers - .write() - .await - .entry((sender_user.clone(), sender_device.clone())) - { - Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(None); - - v.insert((body.since.to_owned(), rx.clone())); - - tokio::spawn(sync_helper_wrapper( - sender_user.clone(), - sender_device.clone(), - body, - tx, - )); - - rx - } - Entry::Occupied(mut o) => { - if o.get().0 != body.since { - let (tx, rx) = tokio::sync::watch::channel(None); - - o.insert((body.since.clone(), rx.clone())); - - info!("Sync started for {sender_user}"); - - tokio::spawn(sync_helper_wrapper( - sender_user.clone(), - sender_device.clone(), - body, - tx, - )); - - rx - } else { - o.get().1.clone() - } - } - }; - - let we_have_to_wait = rx.borrow().is_none(); - if we_have_to_wait { - if let Err(e) = rx.changed().await { - error!("Error waiting for sync: {}", e); - } - } - - let result = match rx - .borrow() - .as_ref() - .expect("When sync channel changes it's always set to some") - { - Ok(response) => Ok(response.clone()), - Err(error) => Err(error.to_response()), - }; - - result -} - -async fn sync_helper_wrapper( - sender_user: OwnedUserId, - sender_device: OwnedDeviceId, - body: sync_events::v3::Request, - tx: Sender>>, -) { - let since = body.since.clone(); - - let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await; - - if let Ok((_, caching_allowed)) = r { - if !caching_allowed { - match services() - .globals - .sync_receivers - .write() - .await - .entry((sender_user, sender_device)) - { - Entry::Occupied(o) => { - // Only remove if the device didn't start a different /sync already - if o.get().0 == since { - o.remove(); - } - } - Entry::Vacant(_) => {} - } - } - } - - let _ = tx.send(Some(r.map(|(r, _)| r))); -} - -async fn sync_helper( - sender_user: OwnedUserId, - sender_device: OwnedDeviceId, - body: sync_events::v3::Request, - // bool = caching allowed -) -> Result<(sync_events::v3::Response, bool), Error> { - // TODO: match body.set_presence { - services().rooms.edus.presence.ping_presence(&sender_user)?; - - // Setup watchers, so if there's no response, we can wait for them - let watcher = services().globals.watch(&sender_user, &sender_device); - - let next_batch = services().globals.current_count()?; - let next_batchcount = PduCount::Normal(next_batch); - let next_batch_string = next_batch.to_string(); - - // Load filter - let filter = match body.filter { - None => FilterDefinition::default(), - Some(Filter::FilterDefinition(filter)) => filter, - Some(Filter::FilterId(filter_id)) => services() - .users - .get_filter(&sender_user, &filter_id)? - .unwrap_or_default(), - }; - - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { - LazyLoadOptions::Enabled { - include_redundant_members: redundant, - } => (true, redundant), - _ => (false, false), - }; - - let full_state = body.full_state; - - let mut joined_rooms = BTreeMap::new(); - let since = body - .since - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - let sincecount = PduCount::Normal(since); - - let mut presence_updates = HashMap::new(); - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_updates = HashSet::new(); - let mut device_list_left = HashSet::new(); - - // Look for device list updates of this account - device_list_updates.extend( - services() - .users - .keys_changed(sender_user.as_ref(), since, None) - .filter_map(|r| r.ok()), - ); - - let all_joined_rooms = services() - .rooms - .state_cache - .rooms_joined(&sender_user) - .collect::>(); - for room_id in all_joined_rooms { - let room_id = room_id?; - if let Ok(joined_room) = load_joined_room( - &sender_user, - &sender_device, - &room_id, - since, - sincecount, - next_batch, - next_batchcount, - lazy_load_enabled, - lazy_load_send_redundant, - full_state, - &mut device_list_updates, - &mut left_encrypted_users, - ) - .await - { - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); - } - - // Take presence updates from this room - for (user_id, presence) in services() - .rooms - .edus - .presence - .presence_since(&room_id, since)? - { - match presence_updates.entry(user_id) { - Entry::Vacant(v) => { - v.insert(presence); - } - Entry::Occupied(mut o) => { - let p = o.get_mut(); - - // Update existing presence event with more info - p.content.presence = presence.content.presence; - if let Some(status_msg) = presence.content.status_msg { - p.content.status_msg = Some(status_msg); - } - if let Some(last_active_ago) = presence.content.last_active_ago { - p.content.last_active_ago = Some(last_active_ago); - } - if let Some(displayname) = presence.content.displayname { - p.content.displayname = Some(displayname); - } - if let Some(avatar_url) = presence.content.avatar_url { - p.content.avatar_url = Some(avatar_url); - } - if let Some(currently_active) = presence.content.currently_active { - p.content.currently_active = Some(currently_active); - } - } - } - } - } - } - - let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services() - .rooms - .state_cache - .rooms_left(&sender_user) - .collect(); - for result in all_left_rooms { - let (room_id, _) = result?; - - { - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().await; - drop(insert_lock); - } - - let left_count = services() - .rooms - .state_cache - .get_left_count(&room_id, &sender_user)?; - - // Left before last sync - if Some(since) >= left_count { - continue; - } - - if !services().rooms.metadata.exists(&room_id)? { - // This is just a rejected invite, not a room we know - let event = PduEvent { - event_id: EventId::new(services().globals.server_name()).into(), - sender: sender_user.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - kind: TimelineEventType::RoomMember, - content: serde_json::from_str(r#"{ "membership": "leave"}"#).unwrap(), - state_key: Some(sender_user.to_string()), - unsigned: None, - // The following keys are dropped on conversion - room_id: room_id.clone(), - prev_events: vec![], - depth: uint!(1), - auth_events: vec![], - redacts: None, - hashes: EventHash { - sha256: String::new(), - }, - signatures: None, - }; - - left_rooms.insert( - room_id, - LeftRoom { - account_data: RoomAccountData { events: Vec::new() }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.clone()), - events: Vec::new(), - }, - state: State { - events: vec![event.to_sync_state_event()], - }, - }, - ); - - continue; - } - - let mut left_state_events = Vec::new(); - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(&room_id, since)?; - - let since_state_ids = match since_shortstatehash { - Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, - None => HashMap::new(), - }; - - let left_event_id = match services().rooms.state_accessor.room_state_get_id( - &room_id, - &StateEventType::RoomMember, - sender_user.as_str(), - )? { - Some(e) => e, - None => { - error!("Left room but no left state event"); - continue; - } - }; - - let left_shortstatehash = match services() - .rooms - .state_accessor - .pdu_shortstatehash(&left_event_id)? - { - Some(s) => s, - None => { - error!("Leave event has no state"); - continue; - } - }; - - let mut left_state_ids = services() - .rooms - .state_accessor - .state_full_ids(left_shortstatehash) - .await?; - - let leave_shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?; - - left_state_ids.insert(leave_shortstatekey, left_event_id); - - let mut i = 0; - for (key, id) in left_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let (event_type, state_key) = - services().rooms.short.get_statekey_from_short(key)?; - - if !lazy_load_enabled - || event_type != StateEventType::RoomMember - || full_state - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || *sender_user == state_key - { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - left_state_events.push(pdu.to_sync_state_event()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - } - - left_rooms.insert( - room_id.clone(), - LeftRoom { - account_data: RoomAccountData { events: Vec::new() }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.clone()), - events: Vec::new(), - }, - state: State { - events: left_state_events, - }, - }, - ); - } - - let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services() - .rooms - .state_cache - .rooms_invited(&sender_user) - .collect(); - for result in all_invited_rooms { - let (room_id, invite_state_events) = result?; - - { - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().await; - drop(insert_lock); - } - - let invite_count = services() - .rooms - .state_cache - .get_invite_count(&room_id, &sender_user)?; - - // Invited before last sync - if Some(since) >= invite_count { - continue; - } - - invited_rooms.insert( - room_id.clone(), - InvitedRoom { - invite_state: InviteState { - events: invite_state_events, - }, - }, - ); - } - - for user_id in left_encrypted_users { - let dont_share_encrypted_room = services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(|r| r.ok()) - .filter_map(|other_room_id| { - Some( - services() - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); - // If the user doesn't share an encrypted room with the target anymore, we need to tell - // them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - - // Remove all to-device events the device received *last time* - services() - .users - .remove_to_device_events(&sender_user, &sender_device, since)?; - - let response = sync_events::v3::Response { - next_batch: next_batch_string, - rooms: Rooms { - leave: left_rooms, - join: joined_rooms, - invite: invited_rooms, - knock: BTreeMap::new(), // TODO - }, - presence: Presence { - events: presence_updates - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) - .collect(), - }, - account_data: GlobalAccountData { - events: services() - .account_data - .changes_since(None, &sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - device_lists: DeviceLists { - changed: device_list_updates.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services() - .users - .count_one_time_keys(&sender_user, &sender_device)?, - to_device: ToDevice { - events: services() - .users - .get_to_device_events(&sender_user, &sender_device)?, - }, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }; - - // TODO: Retry the endpoint instead of returning (waiting for #118) - if !full_state - && response.rooms.is_empty() - && response.presence.is_empty() - && response.account_data.is_empty() - && response.device_lists.is_empty() - && response.to_device.is_empty() - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or_default(); - if duration.as_secs() > 30 { - duration = Duration::from_secs(30); - } - let _ = tokio::time::timeout(duration, watcher).await; - Ok((response, false)) - } else { - Ok((response, since != next_batch)) // Only cache if we made progress - } -} - -#[allow(clippy::too_many_arguments)] -async fn load_joined_room( - sender_user: &UserId, - sender_device: &DeviceId, - room_id: &RoomId, - since: u64, - sincecount: PduCount, - next_batch: u64, - next_batchcount: PduCount, - lazy_load_enabled: bool, - lazy_load_send_redundant: bool, - full_state: bool, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, -) -> Result { - { - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().await; - drop(insert_lock); - } - - let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?; - - let send_notification_counts = !timeline_pdus.is_empty() - || services() - .rooms - .user - .last_notification_read(sender_user, room_id)? - > since; - - let mut timeline_users = HashSet::new(); - for (_, event) in &timeline_pdus { - timeline_users.insert(event.sender.as_str().to_owned()); - } - - services() - .rooms - .lazy_loading - .lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount) - .await?; - - // Database queries: - - let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { - s - } else { - error!("Room {} has no state", room_id); - return Err(Error::BadDatabase("Room has no state")); - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(room_id, since)?; - - let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = - if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { - // No state changes - (Vec::new(), None, None, false, Vec::new()) - } else { - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = services() - .rooms - .state_cache - .room_joined_count(room_id)? - .unwrap_or(0); - let invited_member_count = services() - .rooms - .state_cache - .room_invited_count(room_id)? - .unwrap_or(0); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in services() - .rooms - .timeline - .all_pdus(sender_user, room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = - serde_json::from_str(pdu.content.get()).map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (services() - .rooms - .state_cache - .is_joined(&user_id, room_id)? - || services() - .rooms - .state_cache - .is_invited(&user_id, room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes, - )) - }; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - - if since_shortstatehash.is_none() || joined_since_last_sync { - // Probably since = 0, we will do an initial sync - - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - let mut i = 0; - for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services() - .rooms - .short - .get_statekey_from_short(shortstatekey)?; - - if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } else if !lazy_load_enabled - || full_state - || timeline_users.contains(&state_key) - // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 - || *sender_user == state_key - { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(&state_key) { - lazy_loaded.insert(uid); - } - state_events.push(pdu); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - } - - // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading.lazy_load_reset( - sender_user, - sender_device, - room_id, - )?; - - // The state_events above should contain all timeline_users, let's mark them as lazy - // loaded. - services() - .rooms - .lazy_loading - .lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ) - .await; - - ( - heroes, - joined_member_count, - invited_member_count, - true, - state_events, - ) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); - - let mut state_events = Vec::new(); - let mut lazy_loaded = HashSet::new(); - - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - if pdu.kind == TimelineEventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); - } - Err(e) => error!("Invalid state key for member event: {}", e), - } - } - - state_events.push(pdu); - tokio::task::yield_now().await; - } - } - } - - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } - - if !services().rooms.lazy_loading.lazy_load_was_sent_before( - sender_user, - sender_device, - room_id, - &event.sender, - )? || lazy_load_send_redundant - { - if let Some(member_event) = services().rooms.state_accessor.room_state_get( - room_id, - &StateEventType::RoomMember, - event.sender.as_str(), - )? { - lazy_loaded.insert(event.sender.clone()); - state_events.push(member_event); - } - } - } - - services() - .rooms - .lazy_loading - .lazy_load_mark_sent( - sender_user, - sender_device, - room_id, - lazy_loaded, - next_batchcount, - ) - .await; - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = state_events - .iter() - .any(|event| event.kind == TimelineEventType::RoomMember); - - if encrypted_room { - for state_event in &state_events { - if state_event.kind != TimelineEventType::RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::( - state_event.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(sender_user, &user_id, room_id)? { - device_list_updates.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - services() - .rooms - .state_cache - .room_members(room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(sender_user, user_id, room_id) - .unwrap_or(false) - }), - ); - } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) - } - }; - - // Look for device list updates in this room - device_list_updates.extend( - services() - .users - .keys_changed(room_id.as_ref(), since, None) - .filter_map(|r| r.ok()), - ); - - let notification_count = if send_notification_counts { - Some( - services() - .rooms - .user - .notification_count(sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ) - } else { - None - }; - - let highlight_count = if send_notification_counts { - Some( - services() - .rooms - .user - .highlight_count(sender_user, room_id)? - .try_into() - .expect("highlight count can't go that high"), - ) - } else { - None - }; - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })?; - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let mut edus: Vec<_> = services() - .rooms - .edus - .read_receipt - .readreceipts_since(room_id, since) - .filter_map(|r| r.ok()) // Filter out buggy events - .map(|(_, _, v)| v) - .collect(); - - if services() - .rooms - .edus - .typing - .last_typing_update(room_id) - .await? - > since - { - edus.push( - serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id).await?) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - // Save the state after this sync so we can send the correct state diff next sync - services().rooms.user.associate_token_shortstatehash( - room_id, - next_batch, - current_shortstatehash, - )?; - - Ok(JoinedRoom { - account_data: RoomAccountData { - events: services() - .account_data - .changes_since(Some(room_id), sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - summary: RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: UnreadNotificationsCount { - highlight_count, - notification_count, - }, - timeline: Timeline { - limited: limited || joined_since_last_sync, - prev_batch, - events: room_events, - }, - state: State { - events: state_events - .iter() - .map(|pdu| pdu.to_sync_state_event()) - .collect(), - }, - ephemeral: Ephemeral { events: edus }, - unread_thread_notifications: BTreeMap::new(), - }) -} +pub(crate) mod v3; fn load_timeline( sender_user: &UserId, @@ -1163,36 +15,34 @@ fn load_timeline( ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let timeline_pdus; let limited; - if services() - .rooms - .timeline - .last_timeline_count(sender_user, room_id)? + if services().rooms.timeline.last_timeline_count(sender_user, room_id)? > roomsincecount { let mut non_timeline_pdus = services() .rooms .timeline - .pdus_until(sender_user, room_id, PduCount::max())? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); + .pdus_until(sender_user, room_id, PduCount::MAX)? + .filter_map(|x| match x { + Ok(x) => Some(x), + Err(error) => { + error!(%error, "Bad PDU in pdus_since"); + None } - r.ok() }) .take_while(|(pducount, _)| pducount > &roomsincecount); // Take the last events for the timeline timeline_pdus = non_timeline_pdus .by_ref() - .take(limit as usize) + .take(limit.try_into().expect("limit should fit in usize")) .collect::>() .into_iter() .rev() .collect::>(); - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus + // They /sync response doesn't always return all messages, so we say the + // output is limited unless there are events in + // non_timeline_pdus limited = non_timeline_pdus.next().is_some(); } else { timeline_pdus = Vec::new(); @@ -1210,592 +60,21 @@ fn share_encrypted_room( .rooms .user .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( services() .rooms .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .room_state_get( + &other_room_id, + &StateEventType::RoomEncryption, + "", + ) .ok()? .is_some(), ) }) .any(|encrypted| encrypted)) } - -pub async fn sync_events_v4_route( - body: Ruma, -) -> Result> { - let sender_user = body.sender_user.expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); - let mut body = body.body; - // Setup watchers, so if there's no response, we can wait for them - let watcher = services().globals.watch(&sender_user, &sender_device); - - let next_batch = services().globals.next_count()?; - - let globalsince = body - .pos - .as_ref() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - if globalsince == 0 { - if let Some(conn_id) = &body.conn_id { - services().users.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) - } - } - - // Get sticky parameters from cache - let known_rooms = services().users.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); - - let all_joined_rooms = services() - .rooms - .state_cache - .rooms_joined(&sender_user) - .filter_map(|r| r.ok()) - .collect::>(); - - if body.extensions.to_device.enabled.unwrap_or(false) { - services() - .users - .remove_to_device_events(&sender_user, &sender_device, globalsince)?; - } - - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_changes = HashSet::new(); - let mut device_list_left = HashSet::new(); - - if body.extensions.e2ee.enabled.unwrap_or(false) { - // Look for device list updates of this account - device_list_changes.extend( - services() - .users - .keys_changed(sender_user.as_ref(), globalsince, None) - .filter_map(|r| r.ok()), - ); - - for room_id in &all_joined_rooms { - let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { - s - } else { - error!("Room {} has no state", room_id); - continue; - }; - - let since_shortstatehash = services() - .rooms - .user - .get_token_shortstatehash(room_id, globalsince)?; - - let since_sender_member: Option = since_shortstatehash - .and_then(|shortstatehash| { - services() - .rooms - .state_accessor - .state_get( - shortstatehash, - &StateEventType::RoomMember, - sender_user.as_str(), - ) - .transpose() - }) - .transpose()? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let encrypted_room = services() - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? - .is_some(); - - if let Some(since_shortstatehash) = since_shortstatehash { - // Skip if there are only timeline changes - if since_shortstatehash == current_shortstatehash { - continue; - } - - let since_encryption = services().rooms.state_accessor.state_get( - since_shortstatehash, - &StateEventType::RoomEncryption, - "", - )?; - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - if encrypted_room { - let current_state_ids = services() - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .await?; - let since_state_ids = services() - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .await?; - - for (key, id) in current_state_ids { - if since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.timeline.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - UserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::< - RoomMemberEventContent, - >( - pdu.content.get() - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room( - &sender_user, - &user_id, - room_id, - )? { - device_list_changes.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - } - if joined_since_last_sync || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_changes.extend( - services() - .rooms - .state_cache - .room_members(room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, room_id) - .unwrap_or(false) - }), - ); - } - } - } - // Look for device list updates in this room - device_list_changes.extend( - services() - .users - .keys_changed(room_id.as_ref(), globalsince, None) - .filter_map(|r| r.ok()), - ); - } - for user_id in left_encrypted_users { - let dont_share_encrypted_room = services() - .rooms - .user - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(|r| r.ok()) - .filter_map(|other_room_id| { - Some( - services() - .rooms - .state_accessor - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); - // If the user doesn't share an encrypted room with the target anymore, we need to tell - // them - if dont_share_encrypted_room { - device_list_left.insert(user_id); - } - } - } - - let mut lists = BTreeMap::new(); - let mut todo_rooms = BTreeMap::new(); // and required state - - for (list_id, list) in body.lists { - if list.filters.and_then(|f| f.is_invite).unwrap_or(false) { - continue; - } - - let mut new_known_rooms = BTreeSet::new(); - - lists.insert( - list_id.clone(), - sync_events::v4::SyncList { - ops: list - .ranges - .into_iter() - .map(|mut r| { - r.0 = - r.0.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1)); - r.1 = - r.1.clamp(r.0, UInt::from(all_joined_rooms.len() as u32 - 1)); - let room_ids = all_joined_rooms - [(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)] - .to_vec(); - new_known_rooms.extend(room_ids.iter().cloned()); - for room_id in &room_ids { - let todo_room = todo_rooms.entry(room_id.clone()).or_insert(( - BTreeSet::new(), - 0, - u64::MAX, - )); - let limit = list - .room_details - .timeline_limit - .map_or(10, u64::from) - .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get(&list_id) - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - } - sync_events::v4::SyncOp { - op: SlidingOp::Sync, - range: Some(r), - index: None, - room_ids, - room_id: None, - } - }) - .collect(), - count: UInt::from(all_joined_rooms.len() as u32), - }, - ); - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - list_id, - new_known_rooms, - globalsince, - ); - } - } - - let mut known_subscription_rooms = BTreeSet::new(); - for (room_id, room) in &body.room_subscriptions { - if !services().rooms.metadata.exists(room_id)? { - continue; - } - let todo_room = todo_rooms - .entry(room_id.clone()) - .or_insert((BTreeSet::new(), 0, u64::MAX)); - let limit = room.timeline_limit.map_or(10, u64::from).min(100); - todo_room.0.extend(room.required_state.iter().cloned()); - todo_room.1 = todo_room.1.max(limit); - // 0 means unknown because it got out of date - todo_room.2 = todo_room.2.min( - known_rooms - .get("subscriptions") - .and_then(|k| k.get(room_id)) - .copied() - .unwrap_or(0), - ); - known_subscription_rooms.insert(room_id.clone()); - } - - for r in body.unsubscribe_rooms { - known_subscription_rooms.remove(&r); - body.room_subscriptions.remove(&r); - } - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_known_rooms( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - "subscriptions".to_owned(), - known_subscription_rooms, - globalsince, - ); - } - - if let Some(conn_id) = &body.conn_id { - services().users.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); - } - - let mut rooms = BTreeMap::new(); - for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { - let roomsincecount = PduCount::Normal(*roomsince); - - let (timeline_pdus, limited) = - load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; - - if roomsince != &0 && timeline_pdus.is_empty() { - continue; - } - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_count, _)| { - Ok(Some(match pdu_count { - PduCount::Backfilled(_) => { - error!("timeline in backfill state?!"); - "0".to_owned() - } - PduCount::Normal(c) => c.to_string(), - })) - })? - .or_else(|| { - if roomsince != &0 { - Some(roomsince.to_string()) - } else { - None - } - }); - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let required_state = required_state_request - .iter() - .flat_map(|state| { - services() - .rooms - .state_accessor - .room_state_get(room_id, &state.0, &state.1) - .ok() - .flatten() - .map(|state| state.to_sync_state_event()) - }) - .collect(); - - // Heroes - let heroes = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|r| r.ok()) - .filter(|member| member != &sender_user) - .flat_map(|member| { - services() - .rooms - .state_accessor - .get_member(room_id, &member) - .ok() - .flatten() - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }) - }) - .take(5) - .collect::>(); - let name = match &heroes[..] { - [] => None, - [only] => Some(only.0.clone()), - [firsts @ .., last] => Some( - firsts - .iter() - .map(|h| h.0.clone()) - .collect::>() - .join(", ") - + " and " - + &last.0, - ), - }; - - let avatar = if let [only] = &heroes[..] { - only.1.clone() - } else { - None - }; - - rooms.insert( - room_id.clone(), - sync_events::v4::SlidingSyncRoom { - name: services().rooms.state_accessor.get_name(room_id)?.or(name), - avatar: if let Some(avatar) = avatar { - JsOption::Some(avatar) - } else { - match services().rooms.state_accessor.get_avatar(room_id)? { - JsOption::Some(avatar) => JsOption::from_option(avatar.url), - JsOption::Null => JsOption::Null, - JsOption::Undefined => JsOption::Undefined, - } - }, - initial: Some(roomsince == &0), - is_dm: None, - invite_state: None, - unread_notifications: UnreadNotificationsCount { - highlight_count: Some( - services() - .rooms - .user - .highlight_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - notification_count: Some( - services() - .rooms - .user - .notification_count(&sender_user, room_id)? - .try_into() - .expect("notification count can't go that high"), - ), - }, - timeline: room_events, - required_state, - prev_batch, - limited, - joined_count: Some( - (services() - .rooms - .state_cache - .room_joined_count(room_id)? - .unwrap_or(0) as u32) - .into(), - ), - invited_count: Some( - (services() - .rooms - .state_cache - .room_invited_count(room_id)? - .unwrap_or(0) as u32) - .into(), - ), - num_live: None, // Count events in timeline greater than global sync counter - timestamp: None, - }, - ); - } - - if rooms - .iter() - .all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty()) - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let mut duration = body.timeout.unwrap_or(Duration::from_secs(30)); - if duration.as_secs() > 30 { - duration = Duration::from_secs(30); - } - let _ = tokio::time::timeout(duration, watcher).await; - } - - Ok(sync_events::v4::Response { - initial: globalsince == 0, - txn_id: body.txn_id.clone(), - pos: next_batch.to_string(), - lists, - rooms, - extensions: sync_events::v4::Extensions { - to_device: if body.extensions.to_device.enabled.unwrap_or(false) { - Some(sync_events::v4::ToDevice { - events: services() - .users - .get_to_device_events(&sender_user, &sender_device)?, - next_batch: next_batch.to_string(), - }) - } else { - None - }, - e2ee: sync_events::v4::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: services() - .users - .count_one_time_keys(&sender_user, &sender_device)?, - // Fallback keys are not yet supported - device_unused_fallback_key_types: None, - }, - account_data: sync_events::v4::AccountData { - global: if body.extensions.account_data.enabled.unwrap_or(false) { - services() - .account_data - .changes_since(None, &sender_user, globalsince)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| { - Error::bad_database("Invalid account event in database.") - }) - .ok() - }) - .collect() - } else { - Vec::new() - }, - rooms: BTreeMap::new(), - }, - receipts: sync_events::v4::Receipts { - rooms: BTreeMap::new(), - }, - typing: sync_events::v4::Typing { - rooms: BTreeMap::new(), - }, - }, - delta_token: None, - }) -} diff --git a/src/api/client_server/sync/v3.rs b/src/api/client_server/sync/v3.rs new file mode 100644 index 00000000..3108ff9c --- /dev/null +++ b/src/api/client_server/sync/v3.rs @@ -0,0 +1,1133 @@ +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + time::Duration, +}; + +use ruma::{ + api::client::{ + filter::{FilterDefinition, LazyLoadOptions}, + sync::sync_events::{ + self, + v3::{ + Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, + JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, + Rooms, State, Timeline, ToDevice, + }, + DeviceLists, UnreadNotificationsCount, + }, + uiaa::UiaaResponse, + }, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + uint, DeviceId, EventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, +}; +use tracing::{debug, error, field}; + +use super::{load_timeline, share_encrypted_room}; +use crate::{ + service::{account_data, pdu::EventHash, rooms::timeline::PduCount}, + services, utils, Ar, Error, PduEvent, Ra, Result, +}; + +struct SyncContext<'a> { + sender_user: &'a UserId, + sender_device: &'a DeviceId, + + next_batch: u64, + next_batch_string: String, + next_batchcount: PduCount, + + since: u64, + sincecount: PduCount, + + lazy_load_enabled: bool, + lazy_load_send_redundant: bool, + + full_state: bool, +} + +/// # `GET /_matrix/client/r0/sync` +/// +/// Synchronize the client's state with the latest state on the server. +/// +/// - This endpoint takes a `since` parameter which should be the `next_batch` +/// value from a previous request for incremental syncs. +/// +/// Calling this endpoint without a `since` parameter returns: +/// - Some of the most recent events of each timeline +/// - Notification counts for each room +/// - Joined and invited member counts, heroes +/// - All state events +/// +/// Calling this endpoint with a `since` parameter from a previous `next_batch` +/// returns: For joined rooms: +/// - Some of the most recent events of each timeline that happened after +/// `since` +/// - If user joined the room after `since`: All state events (unless lazy +/// loading is activated) and all device list updates in that room +/// - If the user was already in the room: A list of all events that are in the +/// state now, but were not in the state at `since` +/// - If the state we send contains a member event: Joined and invited member +/// counts, heroes +/// - Device list updates that happened after `since` +/// - If there are events in the timeline we send or the user send updated their +/// read mark: Notification counts +/// - EDUs that are active now (read receipts, typing updates, presence) +/// - TODO: Allow multiple sync streams to support Pantalaimon +/// +/// For invited rooms: +/// - If the user was invited after `since`: A subset of the state of the room +/// at the point of the invite +/// +/// For left rooms: +/// - If the user left after `since`: `prev_batch` token, empty state (TODO: +/// subset of the state at the point of the leave) +#[allow(clippy::too_many_lines)] +#[tracing::instrument( + skip_all, + fields( + sender_user, + sender_device, + next_batch, + since, + lazy_load_enabled, + lazy_load_send_redundant, + ) +)] +pub(crate) async fn sync_events_route( + body: Ar, +) -> Result, Ra> { + let current_span = tracing::Span::current(); + + let sender_user = body.sender_user.expect("user is authenticated"); + current_span.record("sender_user", field::display(&sender_user)); + let sender_device = body.sender_device.expect("user is authenticated"); + current_span.record("sender_device", field::display(&sender_device)); + let body = body.body; + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let ctx = { + let next_batch = services().globals.current_count()?; + current_span.record("next_batch", next_batch); + let next_batchcount = PduCount::Normal(next_batch); + let next_batch_string = next_batch.to_string(); + + // Load filter + let filter = match body.filter { + None => FilterDefinition::default(), + Some(Filter::FilterDefinition(filter)) => filter, + Some(Filter::FilterId(filter_id)) => services() + .users + .get_filter(&sender_user, &filter_id)? + .unwrap_or_default(), + }; + + let (lazy_load_enabled, lazy_load_send_redundant) = + match filter.room.state.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + LazyLoadOptions::Disabled => (false, false), + }; + current_span.record("lazy_load_enabled", lazy_load_enabled); + current_span + .record("lazy_load_send_redundant", lazy_load_send_redundant); + + let full_state = body.full_state; + + let since = body + .since + .as_ref() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + current_span.record("since", since); + let sincecount = PduCount::Normal(since); + + SyncContext { + sender_user: &sender_user, + sender_device: &sender_device, + next_batch, + next_batch_string, + next_batchcount, + since, + sincecount, + lazy_load_enabled, + lazy_load_send_redundant, + full_state, + } + }; + + // Users that have left any encrypted rooms the sender was in + let mut left_encrypted_users = HashSet::new(); + let mut device_list_updates = HashSet::new(); + let mut device_list_left = HashSet::new(); + + // Look for device list updates of this account + device_list_updates.extend( + services() + .users + .keys_changed(ctx.sender_user.as_ref(), ctx.since, None) + .filter_map(Result::ok), + ); + + let joined_rooms = collect_joined_rooms( + &ctx, + &mut device_list_updates, + &mut left_encrypted_users, + ) + .await?; + let left_rooms = collect_left_rooms(&ctx).await?; + let invited_rooms = collect_invited_rooms(&ctx).await?; + + for user_id in left_encrypted_users { + let dont_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![ + ctx.sender_user.to_owned(), + user_id.clone(), + ])? + .filter_map(Result::ok) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get( + &other_room_id, + &StateEventType::RoomEncryption, + "", + ) + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, + // we need to tell them + if dont_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + // Remove all to-device events the device received *last time* + services().users.remove_to_device_events( + ctx.sender_user, + ctx.sender_device, + ctx.since, + )?; + + let response = sync_events::v3::Response { + next_batch: ctx.next_batch_string, + rooms: Rooms { + leave: left_rooms, + join: joined_rooms, + invite: invited_rooms, + // TODO + knock: BTreeMap::new(), + }, + presence: Presence::default(), + account_data: GlobalAccountData { + events: services() + .account_data + .global_changes_since(ctx.sender_user, ctx.since)? + .into_iter() + .map(|(event_type, content)| { + account_data::raw_global_event_from_parts( + &event_type, + &content, + ) + }) + .collect(), + }, + device_lists: DeviceLists { + changed: device_list_updates.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services() + .users + .count_one_time_keys(ctx.sender_user, ctx.sender_device)?, + to_device: ToDevice { + events: services() + .users + .get_to_device_events(ctx.sender_user, ctx.sender_device)?, + }, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }; + + // TODO: Retry the endpoint instead of returning (waiting for #118) + if !ctx.full_state + && response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.to_device.is_empty() + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or_default(); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + match tokio::time::timeout(duration, watcher).await { + Ok(x) => x.expect("watcher should succeed"), + Err(error) => debug!(%error, "Timed out"), + }; + } + Ok(Ra(response)) +} + +#[tracing::instrument(skip_all)] +async fn collect_joined_rooms( + ctx: &SyncContext<'_>, + device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, +) -> Result> { + let mut joined_rooms = BTreeMap::new(); + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(ctx.sender_user) + .collect::>(); + for room_id in all_joined_rooms { + let room_id = room_id?; + if let Ok(joined_room) = load_joined_room( + ctx, + &room_id, + device_list_updates, + left_encrypted_users, + ) + .await + { + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } + } + } + + Ok(joined_rooms) +} + +#[tracing::instrument(skip_all, fields(room_id = %room_id))] +#[allow(clippy::too_many_arguments, clippy::too_many_lines)] +async fn load_joined_room( + ctx: &SyncContext<'_>, + room_id: &RoomId, + device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, +) -> Result { + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let room_token = services() + .globals + .roomid_mutex_insert + .lock_key(room_id.to_owned()) + .await; + drop(room_token); + } + + let (timeline_pdus, limited) = + load_timeline(ctx.sender_user, room_id, ctx.sincecount, 10)?; + + let send_notification_counts = !timeline_pdus.is_empty() + || services() + .rooms + .user + .last_notification_read(ctx.sender_user, room_id)? + > ctx.since; + + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + services() + .rooms + .lazy_loading + .lazy_load_confirm_delivery( + ctx.sender_user, + ctx.sender_device, + room_id, + ctx.sincecount, + ) + .await?; + + // Database queries: + + let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + else { + error!("Room has no state"); + return Err(Error::BadDatabase("Room has no state")); + }; + + let since_shortstatehash = + services().rooms.user.get_token_shortstatehash(room_id, ctx.since)?; + + let ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) = if timeline_pdus.is_empty() + && since_shortstatehash == Some(current_shortstatehash) + { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(room_id)? + .unwrap_or(0); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the + // user is still joined or invited until we have + // 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(ctx.sender_user, room_id)? + .filter_map(Result::ok) + .filter(|(_, pdu)| { + pdu.kind == TimelineEventType::RoomMember + }) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = + serde_json::from_str(pdu.content.get()).map_err( + |_| { + Error::bad_database( + "Invalid member event in database.", + ) + }, + )?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| { + Error::bad_database( + "Invalid UserId in member PDU.", + ) + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (services() + .rooms + .state_cache + .is_joined(&user_id, room_id)? + || services() + .rooms + .state_cache + .is_invited(&user_id, room_id)?) + { + Ok::<_, Error>(Some(state_key.parse().expect( + "`state_key` should be a valid user ID", + ))) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + .filter_map(Result::ok) + .flatten() + { + if heroes.contains(&hero) + || hero == ctx.sender_user.as_str() + { + continue; + } + + heroes.push(hero); + } + } + + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes, + )) + }; + + let since_sender_member: Option = + since_shortstatehash + .and_then(|shortstatehash| { + services() + .rooms + .state_accessor + .state_get( + shortstatehash, + &StateEventType::RoomMember, + ctx.sender_user.as_str(), + ) + .transpose() + }) + .transpose()? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + }) + .ok() + }); + + let joined_since_last_sync = since_sender_member + .is_none_or(|member| member.membership != MembershipState::Join); + + if since_shortstatehash.is_none() || joined_since_last_sync { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = + calculate_counts()?; + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, event_id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !ctx.lazy_load_enabled + || ctx.full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *ctx.sender_user == state_key + { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + + // This check is in case a bad user ID made it into the + // database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + + // Reset lazy loading because this is an initial sync + services().rooms.lazy_loading.lazy_load_reset( + ctx.sender_user, + ctx.sender_device, + room_id, + )?; + + // The state_events above should contain all timeline_users, let's + // mark them as lazy loaded. + services() + .rooms + .lazy_loading + .lazy_load_mark_sent( + ctx.sender_user, + ctx.sender_device, + room_id, + lazy_loaded, + ctx.next_batchcount, + ) + .await; + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let mut delta_state_events = Vec::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, event_id) in current_state_ids { + if ctx.full_state + || since_state_ids.get(&key) != Some(&event_id) + { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + + delta_state_events.push(pdu); + tokio::task::yield_now().await; + } + } + } + + let encrypted_room = services() + .rooms + .state_accessor + .state_get( + current_shortstatehash, + &StateEventType::RoomEncryption, + "", + )? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = + encrypted_room && since_encryption.is_none(); + + let send_member_count = delta_state_events + .iter() + .any(|event| event.kind == TimelineEventType::RoomMember); + + if encrypted_room { + for state_event in &delta_state_events { + if state_event.kind != TimelineEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| { + Error::bad_database( + "Invalid UserId in member PDU.", + ) + })?; + + if user_id == ctx.sender_user { + continue; + } + + let new_membership = + serde_json::from_str::( + state_event.content.get(), + ) + .map_err(|_| { + Error::bad_database("Invalid PDU in database.") + })? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room( + ctx.sender_user, + &user_id, + room_id, + )? { + device_list_updates.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted + // rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined + // users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the + // sender + ctx.sender_user != *user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an + // encrypted room with the target already + !share_encrypted_room( + ctx.sender_user, + user_id, + room_id, + ) + .unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = + if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + let mut state_events = delta_state_events; + let mut lazy_loaded = HashSet::new(); + + // Mark all member events we're returning as lazy-loaded + for pdu in &state_events { + if pdu.kind == TimelineEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(error) => { + error!( + event_id = %pdu.event_id, + %error, + "Invalid state key for member event", + ); + } + } + } + } + + // Fetch contextual member state events for events from the + // timeline, and mark them as lazy-loaded as well. + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + ctx.sender_user, + ctx.sender_device, + room_id, + &event.sender, + )? || ctx.lazy_load_send_redundant + { + if let Some(member_event) = + services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? + { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } + } + + services() + .rooms + .lazy_loading + .lazy_load_mark_sent( + ctx.sender_user, + ctx.sender_device, + room_id, + lazy_loaded, + ctx.next_batchcount, + ) + .await; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + } + }; + + // Look for device list updates in this room + device_list_updates.extend( + services() + .users + .keys_changed(room_id.as_ref(), ctx.since, None) + .filter_map(Result::ok), + ); + + let notification_count = send_notification_counts + .then(|| { + services().rooms.user.notification_count(ctx.sender_user, room_id) + }) + .transpose()? + .map(|x| x.try_into().expect("notification count can't go that high")); + + let highlight_count = send_notification_counts + .then(|| { + services().rooms.user.highlight_count(ctx.sender_user, room_id) + }) + .transpose()? + .map(|x| x.try_into().expect("highlight count can't go that high")); + + let prev_batch = timeline_pdus.first().map_or( + Ok::<_, Error>(None), + |(pdu_count, _)| { + Ok(Some(match pdu_count { + PduCount::Backfilled(_) => { + error!("Timeline in backfill state?!"); + "0".to_owned() + } + PduCount::Normal(c) => c.to_string(), + })) + }, + )?; + + let room_events: Vec<_> = + timeline_pdus.iter().map(|(_, pdu)| pdu.to_sync_room_event()).collect(); + + let mut edus: Vec<_> = services() + .rooms + .edus + .read_receipt + .readreceipts_since(room_id, ctx.since) + .filter_map(Result::ok) + .map(|(_, _, v)| v) + .collect(); + + if services().rooms.edus.typing.last_typing_update(room_id).await? + > ctx.since + { + edus.push( + serde_json::from_str( + &serde_json::to_string( + &services().rooms.edus.typing.typings_all(room_id).await?, + ) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + // Save the state after this sync so we can send the correct state diff next + // sync + services().rooms.user.associate_token_shortstatehash( + room_id, + ctx.next_batch, + current_shortstatehash, + )?; + + Ok(JoinedRoom { + account_data: RoomAccountData { + events: services() + .account_data + .room_changes_since(ctx.sender_user, room_id, ctx.since)? + .into_iter() + .map(|(event_type, content)| { + account_data::raw_room_event_from_parts( + &event_type, + &content, + ) + }) + .collect(), + }, + summary: RoomSummary { + heroes, + joined_member_count: joined_member_count.map(UInt::new_saturating), + invited_member_count: invited_member_count + .map(UInt::new_saturating), + }, + unread_notifications: UnreadNotificationsCount { + highlight_count, + notification_count, + }, + timeline: Timeline { + limited: limited || joined_since_last_sync, + prev_batch, + events: room_events, + }, + state: State { + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), + }, + ephemeral: Ephemeral { + events: edus, + }, + unread_thread_notifications: BTreeMap::new(), + }) +} + +#[allow(clippy::too_many_lines)] +#[tracing::instrument(skip_all)] +async fn collect_left_rooms( + ctx: &SyncContext<'_>, +) -> Result> { + let mut left_rooms = BTreeMap::new(); + let all_left_rooms: Vec<_> = + services().rooms.state_cache.rooms_left(ctx.sender_user).collect(); + for result in all_left_rooms { + let (room_id, _) = result?; + + { + // Get and drop the lock to wait for remaining operations to finish + let room_token = services() + .globals + .roomid_mutex_insert + .lock_key(room_id.clone()) + .await; + drop(room_token); + } + + let left_count = services() + .rooms + .state_cache + .get_left_count(&room_id, ctx.sender_user)?; + + // Left before last sync + if Some(ctx.since) >= left_count { + continue; + } + + if !services().rooms.metadata.exists(&room_id)? { + // This is just a rejected invite, not a room we know + let event = PduEvent { + event_id: EventId::new(services().globals.server_name()).into(), + sender: ctx.sender_user.to_owned(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + kind: TimelineEventType::RoomMember, + content: serde_json::from_str(r#"{ "membership": "leave"}"#) + .unwrap(), + state_key: Some(ctx.sender_user.to_string()), + unsigned: None, + // The following keys are dropped on conversion + room_id: room_id.clone(), + prev_events: vec![], + depth: uint!(1), + auth_events: vec![], + redacts: None, + hashes: EventHash { + sha256: String::new(), + }, + signatures: None, + }; + + left_rooms.insert( + room_id, + LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), + }, + timeline: Timeline { + limited: false, + prev_batch: Some(ctx.next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: vec![event.to_sync_state_event()], + }, + }, + ); + + continue; + } + + let mut left_state_events = Vec::new(); + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, ctx.since)?; + + let since_state_ids = match since_shortstatehash { + Some(s) => { + services().rooms.state_accessor.state_full_ids(s).await? + } + None => HashMap::new(), + }; + + let Some(left_event_id) = + services().rooms.state_accessor.room_state_get_id( + &room_id, + &StateEventType::RoomMember, + ctx.sender_user.as_str(), + )? + else { + error!("Left room but no left state event"); + continue; + }; + + let Some(left_shortstatehash) = services() + .rooms + .state_accessor + .pdu_shortstatehash(&left_event_id)? + else { + error!("Leave event has no state"); + continue; + }; + + let mut left_state_ids = services() + .rooms + .state_accessor + .state_full_ids(left_shortstatehash) + .await?; + + let leave_shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &StateEventType::RoomMember, + ctx.sender_user.as_str(), + )?; + + left_state_ids.insert(leave_shortstatekey, left_event_id); + + let mut i = 0; + for (key, event_id) in left_state_ids { + if ctx.full_state || since_state_ids.get(&key) != Some(&event_id) { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(key)?; + + if !ctx.lazy_load_enabled + || event_type != StateEventType::RoomMember + || ctx.full_state + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *ctx.sender_user == state_key + { + let Some(pdu) = + services().rooms.timeline.get_pdu(&event_id)? + else { + error!(%event_id, "Event in state not found"); + continue; + }; + + left_state_events.push(pdu.to_sync_state_event()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + } + + left_rooms.insert( + room_id.clone(), + LeftRoom { + account_data: RoomAccountData { + events: Vec::new(), + }, + timeline: Timeline { + limited: false, + prev_batch: Some(ctx.next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: left_state_events, + }, + }, + ); + } + + Ok(left_rooms) +} + +#[tracing::instrument(skip_all)] +async fn collect_invited_rooms( + ctx: &SyncContext<'_>, +) -> Result> { + let mut invited_rooms = BTreeMap::new(); + let all_invited_rooms: Vec<_> = + services().rooms.state_cache.rooms_invited(ctx.sender_user).collect(); + for result in all_invited_rooms { + let (room_id, invite_state_events) = result?; + + { + // Get and drop the lock to wait for remaining operations to finish + let room_token = services() + .globals + .roomid_mutex_insert + .lock_key(room_id.clone()) + .await; + drop(room_token); + } + + let invite_count = services() + .rooms + .state_cache + .get_invite_count(&room_id, ctx.sender_user)?; + + // Invited before last sync + if Some(ctx.since) >= invite_count { + continue; + } + + invited_rooms.insert( + room_id.clone(), + InvitedRoom { + invite_state: InviteState { + events: invite_state_events, + }, + }, + ); + } + + Ok(invited_rooms) +} diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index 16f1600f..aec1a2f0 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,55 +1,50 @@ -use crate::{services, Error, Result, Ruma}; +use std::collections::BTreeMap; + use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, - events::{ - tag::{TagEvent, TagEventContent}, - RoomAccountDataEventType, - }, + events::tag::TagEventContent, + serde::Raw, }; -use std::collections::BTreeMap; + +use crate::{services, Ar, Error, Ra, Result}; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// /// Adds a tag to the room. /// /// - Inserts the tag into the tag event of the room account data. -pub async fn update_tag_route( - body: Ruma, -) -> Result { +pub(crate) async fn update_tag_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services().account_data.get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; + let event = services() + .account_data + .get_room::(&body.room_id, sender_user)?; - let mut tags_event = event - .map(|e| { - serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) - }) - .unwrap_or_else(|| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + let mut tags_event = event.map_or_else( + || { + Ok(TagEventContent { + tags: BTreeMap::new(), }) - })?; - - tags_event - .content - .tags - .insert(body.tag.clone().into(), body.tag_info.clone()); - - services().account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), + }, + |e| { + e.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) + }, )?; - Ok(create_tag::v3::Response {}) + tags_event.tags.insert(body.tag.clone().into(), body.tag_info.clone()); + + services().account_data.update_room( + &body.room_id, + sender_user, + &Raw::new(&tags_event) + .expect("json event serialization should always suceed"), + )?; + + Ok(Ra(create_tag::v3::Response {})) } /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` @@ -57,40 +52,38 @@ pub async fn update_tag_route( /// Deletes a tag from the room. /// /// - Removes the tag from the tag event of the room account data. -pub async fn delete_tag_route( - body: Ruma, -) -> Result { +pub(crate) async fn delete_tag_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services().account_data.get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; + let event = services() + .account_data + .get_room::(&body.room_id, sender_user)?; - let mut tags_event = event - .map(|e| { - serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) - }) - .unwrap_or_else(|| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, + let mut tags_event = event.map_or_else( + || { + Ok(TagEventContent { + tags: BTreeMap::new(), }) - })?; - - tags_event.content.tags.remove(&body.tag.clone().into()); - - services().account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), + }, + |e| { + e.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) + }, )?; - Ok(delete_tag::v3::Response {}) + tags_event.tags.remove(&body.tag.clone().into()); + + services().account_data.update_room( + &body.room_id, + sender_user, + &Raw::new(&tags_event) + .expect("json value serialization should always succeed"), + )?; + + Ok(Ra(delete_tag::v3::Response {})) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` @@ -98,29 +91,29 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -pub async fn get_tags_route(body: Ruma) -> Result { +pub(crate) async fn get_tags_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services().account_data.get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, + let event = services() + .account_data + .get_room::(&body.room_id, sender_user)?; + + let tags_event = event.map_or_else( + || { + Ok(TagEventContent { + tags: BTreeMap::new(), + }) + }, + |e| { + e.deserialize().map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) + }, )?; - let tags_event = event - .map(|e| { - serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) - }) - .unwrap_or_else(|| { - Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - })?; - - Ok(get_tags::v3::Response { - tags: tags_event.content.tags, - }) + Ok(Ra(get_tags::v3::Response { + tags: tags_event.tags, + })) } diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs index c2c1adfd..a22fa548 100644 --- a/src/api/client_server/thirdparty.rs +++ b/src/api/client_server/thirdparty.rs @@ -1,16 +1,17 @@ -use crate::{Result, Ruma}; +use std::collections::BTreeMap; + use ruma::api::client::thirdparty::get_protocols; -use std::collections::BTreeMap; +use crate::{Ar, Ra, Result}; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// /// TODO: Fetches all metadata about protocols supported by the homeserver. -pub async fn get_protocols_route( - _body: Ruma, -) -> Result { +pub(crate) async fn get_protocols_route( + _body: Ar, +) -> Result> { // TODO - Ok(get_protocols::v3::Response { + Ok(Ra(get_protocols::v3::Response { protocols: BTreeMap::new(), - }) + })) } diff --git a/src/api/client_server/threads.rs b/src/api/client_server/threads.rs index a095b420..cf96e234 100644 --- a/src/api/client_server/threads.rs +++ b/src/api/client_server/threads.rs @@ -1,19 +1,16 @@ use ruma::api::client::{error::ErrorKind, threads::get_threads}; -use crate::{services, Error, Result, Ruma}; +use crate::{services, Ar, Error, Ra, Result}; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` -pub async fn get_threads_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_threads_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Use limit or else 10, with maximum 100 - let limit = body - .limit - .and_then(|l| l.try_into().ok()) - .unwrap_or(10) - .min(100); + let limit = + body.limit.and_then(|l| l.try_into().ok()).unwrap_or(10).min(100); let from = if let Some(from) = &body.from { from.parse() @@ -27,7 +24,7 @@ pub async fn get_threads_route( .threads .threads_until(sender_user, &body.room_id, from, &body.include)? .take(limit) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms @@ -39,11 +36,11 @@ pub async fn get_threads_route( let next_batch = threads.last().map(|(count, _)| count.to_string()); - Ok(get_threads::v1::Response { + Ok(Ra(get_threads::v1::Response { chunk: threads .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(), next_batch, - }) + })) } diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 31590fc7..663908b4 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,20 +1,22 @@ use std::collections::BTreeMap; -use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, + serde::Raw, to_device::DeviceIdOrAllDevices, }; +use crate::{services, Ar, Error, Ra, Result}; + /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -pub async fn send_event_to_device_route( - body: Ruma, -) -> Result { +pub(crate) async fn send_event_to_device_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -24,12 +26,13 @@ pub async fn send_event_to_device_route( .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::v3::Response {}); + return Ok(Ra(send_event_to_device::v3::Response {})); } for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { - if target_user_id.server_name() != services().globals.server_name() { + if target_user_id.server_name() != services().globals.server_name() + { let mut map = BTreeMap::new(); map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); @@ -38,14 +41,16 @@ pub async fn send_event_to_device_route( services().sending.send_reliable_edu( target_user_id.server_name(), - serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( - DirectDeviceContent { - sender: sender_user.clone(), - ev_type: body.event_type.clone(), - message_id: count.to_string().into(), - messages, - }, - )) + Raw::new( + &federation::transactions::edu::Edu::DirectToDevice( + DirectDeviceContent { + sender: sender_user.clone(), + ev_type: body.event_type.clone(), + message_id: count.to_string().into(), + messages, + }, + ), + ) .expect("DirectToDevice EDU can be serialized"), count, )?; @@ -61,20 +66,28 @@ pub async fn send_event_to_device_route( target_device_id, &body.event_type.to_string(), event.deserialize_as().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) })?, - )? + )?; } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in services().users.all_device_ids(target_user_id) { + for target_device_id in + services().users.all_device_ids(target_user_id) + { services().users.add_to_device_event( sender_user, target_user_id, &target_device_id?, &body.event_type.to_string(), event.deserialize_as().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) })?, )?; } @@ -84,9 +97,12 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - services() - .transaction_ids - .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; + services().transaction_ids.add_txnid( + sender_user, + sender_device, + &body.txn_id, + &[], + )?; - Ok(send_event_to_device::v3::Response {}) + Ok(Ra(send_event_to_device::v3::Response {})) } diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index e9e93708..2657b084 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -1,23 +1,20 @@ -use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; +use crate::{services, utils, Ar, Error, Ra, Result}; + /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. -pub async fn create_typing_event_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_typing_event_route( + body: Ar, +) -> Result> { use create_typing_event::v3::Typing; let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services() - .rooms - .state_cache - .is_joined(sender_user, &body.room_id)? - { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "You are not in this room.", )); } @@ -30,7 +27,8 @@ pub async fn create_typing_event_route( .typing_add( sender_user, &body.room_id, - duration.as_millis() as u64 + utils::millis_since_unix_epoch(), + duration.as_millis().try_into().unwrap_or(u64::MAX) + + utils::millis_since_unix_epoch(), ) .await?; } else { @@ -42,5 +40,5 @@ pub async fn create_typing_event_route( .await?; } - Ok(create_typing_event::v3::Response {}) + Ok(Ra(create_typing_event::v3::Response {})) } diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 70e260ec..92ea88ed 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -1,23 +1,24 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use axum::{response::IntoResponse, Json}; -use ruma::api::client::{discovery::get_supported_versions, error::ErrorKind}; +use ruma::api::client::discovery::get_supported_versions; -use crate::{services, Error, Result, Ruma}; +use crate::{Ar, Ra, Result}; /// # `GET /_matrix/client/versions` /// -/// Get the versions of the specification and unstable features supported by this server. +/// Get the versions of the specification and unstable features supported by +/// this server. /// /// - Versions take the form MAJOR.MINOR.PATCH /// - Only the latest PATCH release will be reported for each MAJOR.MINOR value -/// - Unstable features are namespaced and may include version information in their name +/// - Unstable features are namespaced and may include version information in +/// their name /// -/// Note: Unstable features are used while developing new features. Clients should avoid using -/// unstable features in their stable releases -pub async fn get_supported_versions_route( - _body: Ruma, -) -> Result { +/// Note: Unstable features are used while developing new features. Clients +/// should avoid using unstable features in their stable releases +pub(crate) async fn get_supported_versions_route( + _body: Ar, +) -> Result> { let resp = get_supported_versions::Response { versions: vec![ "r0.5.0".to_owned(), @@ -28,23 +29,11 @@ pub async fn get_supported_versions_route( "v1.4".to_owned(), "v1.5".to_owned(), ], - unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), + unstable_features: BTreeMap::from_iter([ + ("org.matrix.e2e_cross_signing".to_owned(), true), + ("org.matrix.msc3916.stable".to_owned(), true), + ]), }; - Ok(resp) -} - -/// # `GET /.well-known/matrix/client` -pub async fn well_known_client_route( - _body: Ruma, -) -> Result { - let client_url = match services().globals.well_known_client() { - Some(url) => url.clone(), - None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")), - }; - - Ok(Json(serde_json::json!({ - "m.homeserver": {"base_url": client_url}, - "org.matrix.msc3575.proxy": {"url": client_url} - }))) + Ok(Ra(resp)) } diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index b4d11800..0d8912ce 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -1,4 +1,3 @@ -use crate::{services, Result, Ruma}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -7,20 +6,21 @@ use ruma::{ }, }; +use crate::{services, Ar, Ra, Result}; + /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. /// -/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) -/// and don't share a room with the sender -pub async fn search_users_route( - body: Ruma, -) -> Result { +/// - Hides any local users that aren't in any public rooms (i.e. those that +/// have the join rule set to public) and don't share a room with the sender +pub(crate) async fn search_users_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = u64::from(body.limit) as usize; + let limit = body.limit.try_into().unwrap_or(usize::MAX); let mut users = services().users.iter().filter_map(|user_id| { - // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; let user = search_users::v3::User { @@ -39,8 +39,7 @@ pub async fn search_users_route( .display_name .as_ref() .filter(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) + name.to_lowercase().contains(&body.search_term.to_lowercase()) }) .is_some(); @@ -55,18 +54,19 @@ pub async fn search_users_route( .rooms .state_cache .rooms_joined(&user_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .any(|room| { services() .rooms .state_accessor .room_state_get(&room, &StateEventType::RoomJoinRules, "") - .map_or(false, |event| { - event.map_or(false, |event| { - serde_json::from_str(event.content.get()) - .map_or(false, |r: RoomJoinRulesEventContent| { + .is_ok_and(|event| { + event.is_some_and(|event| { + serde_json::from_str(event.content.get()).is_ok_and( + |r: RoomJoinRulesEventContent| { r.join_rule == JoinRule::Public - }) + }, + ) }) }) }); @@ -97,5 +97,8 @@ pub async fn search_users_route( let results = users.by_ref().take(limit).collect(); let limited = users.next().is_some(); - Ok(search_users::v3::Response { results, limited }) + Ok(Ra(search_users::v3::Response { + results, + limited, + })) } diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index f0d91f71..f24ca81e 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,25 +1,33 @@ -use crate::{services, Result, Ruma}; +use std::time::{Duration, SystemTime}; + use base64::{engine::general_purpose, Engine as _}; use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; -use std::time::{Duration, SystemTime}; + +use crate::{services, Ar, Ra, Result}; type HmacSha1 = Hmac; /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -pub async fn turn_server_route( - body: Ruma, -) -> Result { +pub(crate) async fn turn_server_route( + body: Ar, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let turn_secret = services().globals.turn_secret().clone(); - let (username, password) = if !turn_secret.is_empty() { + let (username, password) = if turn_secret.is_empty() { + ( + services().globals.turn_username().clone(), + services().globals.turn_password().clone(), + ) + } else { let expiry = SecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()), + SystemTime::now() + + Duration::from_secs(services().globals.turn_ttl()), ) .expect("time is valid"); @@ -29,20 +37,16 @@ pub async fn turn_server_route( .expect("HMAC can take key of any size"); mac.update(username.as_bytes()); - let password: String = general_purpose::STANDARD.encode(mac.finalize().into_bytes()); + let password: String = + general_purpose::STANDARD.encode(mac.finalize().into_bytes()); (username, password) - } else { - ( - services().globals.turn_username().clone(), - services().globals.turn_password().clone(), - ) }; - Ok(get_turn_server_info::v3::Response { + Ok(Ra(get_turn_server_info::v3::Response { username, password, uris: services().globals.turn_uris().to_vec(), ttl: Duration::from_secs(services().globals.turn_ttl()), - }) + })) } diff --git a/src/api/mod.rs b/src/api/mod.rs deleted file mode 100644 index 0d2cd664..00000000 --- a/src/api/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod appservice_server; -pub mod client_server; -pub mod ruma_wrapper; -pub mod server_server; diff --git a/src/api/ruma_wrapper.rs b/src/api/ruma_wrapper.rs new file mode 100644 index 00000000..91ea8d35 --- /dev/null +++ b/src/api/ruma_wrapper.rs @@ -0,0 +1,77 @@ +use std::ops::Deref; + +use ruma::{ + api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, + OwnedServerName, OwnedUserId, +}; + +use crate::{service::appservice::RegistrationInfo, Error}; + +mod axum; + +/// A wrapper to convert an Axum request to Ruma data +/// +/// Named so because this converts from **A**xum to **R**uma. See also [`Ra`], +/// which is roughly the inverse of this type. +pub(crate) struct Ar { + /// The Ruma type to deserialize the body into + pub(crate) body: T, + pub(crate) sender_user: Option, + pub(crate) sender_device: Option, + pub(crate) sender_servername: Option, + // This is None when body is not a valid string + pub(crate) json_body: Option, + pub(crate) appservice_info: Option, +} + +impl Ar { + pub(crate) fn map_body(self, f: F) -> Ar + where + F: FnOnce(T) -> U, + { + let Ar { + body, + sender_user, + sender_device, + sender_servername, + json_body, + appservice_info, + } = self; + + Ar { + body: f(body), + sender_user, + sender_device, + sender_servername, + json_body, + appservice_info, + } + } +} + +impl Deref for Ar { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.body + } +} + +/// A wrapper to convert Ruma data to an Axum response +/// +/// Named so because this converts from **R**uma to **A**xum. See also [`Ar`], +/// which is roughly the inverse of this type. +#[derive(Clone)] +pub(crate) struct Ra(pub(crate) T); + +impl From for Ra { + fn from(t: T) -> Self { + Self(t) + } +} + +impl From for Ra { + fn from(t: Error) -> Self { + t.to_response() + } +} diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 649c1f54..01f80fb4 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -2,25 +2,31 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, - body::{Full, HttpBody}, - extract::{rejection::TypedHeaderRejectionReason, FromRequest, Path, TypedHeader}, - headers::{ - authorization::{Bearer, Credentials}, - Authorization, - }, + body::Body, + extract::{FromRequest, Path}, response::{IntoResponse, Response}, - BoxError, RequestExt, RequestPartsExt, + RequestExt, RequestPartsExt, }; -use bytes::{Buf, BufMut, Bytes, BytesMut}; +use axum_extra::{ + headers::{authorization::Bearer, Authorization}, + typed_header::TypedHeaderRejectionReason, + TypedHeader, +}; +use bytes::{BufMut, Bytes, BytesMut}; use http::{Request, StatusCode}; +use http_body_util::BodyExt; use ruma::{ - api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, + api::{ + client::error::ErrorKind, federation::authentication::XMatrix, + AuthScheme, IncomingRequest, Metadata, OutgoingResponse, + }, + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedDeviceId, + OwnedServerName, OwnedUserId, UserId, }; use serde::Deserialize; -use tracing::{debug, error, warn}; +use tracing::{error, warn}; -use super::{Ruma, RumaResponse}; +use super::{Ar, Ra}; use crate::{service::appservice::RegistrationInfo, services, Error, Result}; enum Token { @@ -30,421 +36,419 @@ enum Token { None, } -#[async_trait] -impl FromRequest for Ruma -where - T: IncomingRequest, - B: HttpBody + Send + 'static, - B::Data: Send, - B::Error: Into, -{ - type Rejection = Error; +/// Return value of [`ar_from_request_inner()`], used to construct an [`Ar`]. +struct ArPieces { + sender_user: Option, + sender_device: Option, + sender_servername: Option, + json_body: Option, + appservice_info: Option, + path_params: Path>, + http_request: Request, +} - async fn from_request(req: Request, _state: &S) -> Result { - #[derive(Deserialize)] - struct QueryParams { - access_token: Option, - user_id: Option, +/// Non-generic part of [`Ar::from_request()`]. Splitting this out reduces +/// binary size by ~10%. +#[allow(clippy::too_many_lines)] +async fn ar_from_request_inner( + req: axum::extract::Request, + metadata: Metadata, +) -> Result { + #[derive(Deserialize)] + struct QueryParams { + access_token: Option, + user_id: Option, + } + + let (mut parts, mut body) = { + let limited_req = req.with_limited_body(); + let (parts, body) = limited_req.into_parts(); + let body = body + .collect() + .await + .map_err(|_| { + Error::BadRequest(ErrorKind::MissingToken, "Missing token.") + })? + .to_bytes(); + (parts, body) + }; + + let auth_header: Option>> = + parts.extract().await?; + let path_params: Path> = parts.extract().await?; + + let query = parts.uri.query().unwrap_or_default(); + let query_params: QueryParams = match serde_html_form::from_str(query) { + Ok(params) => params, + Err(error) => { + error!(%error, %query, "Failed to deserialize query parameters"); + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Failed to read query parameters", + )); } + }; - let (mut parts, mut body) = match req.with_limited_body() { - Ok(limited_req) => { - let (parts, body) = limited_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } - Err(original_req) => { - let (parts, body) = original_req.into_parts(); - let body = to_bytes(body) - .await - .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; - (parts, body) - } - }; + let token = match &auth_header { + Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + None => query_params.access_token.as_deref(), + }; - let metadata = T::METADATA; - let auth_header: Option>> = parts.extract().await?; - let path_params: Path> = parts.extract().await?; + let token = if let Some(token) = token { + if let Some(reg_info) = + services().appservice.find_from_token(token).await + { + Token::Appservice(Box::new(reg_info.clone())) + } else if let Some((user_id, device_id)) = + services().users.find_from_token(token)? + { + Token::User((user_id, OwnedDeviceId::from(device_id))) + } else { + Token::Invalid + } + } else { + Token::None + }; - let query = parts.uri.query().unwrap_or_default(); - let query_params: QueryParams = match serde_html_form::from_str(query) { - Ok(params) => params, - Err(e) => { - error!(%query, "Failed to deserialize query parameters: {}", e); + let mut json_body = + serde_json::from_slice::(&body).ok(); + + let (sender_user, sender_device, sender_servername, appservice_info) = + match (metadata.authentication, token) { + (_, Token::Invalid) => { return Err(Error::BadRequest( - ErrorKind::Unknown, - "Failed to read query parameters", + ErrorKind::UnknownToken { + soft_logout: false, + }, + "Unknown access token.", + )) + } + (AuthScheme::AccessToken, Token::Appservice(info)) => { + let user_id = query_params + .user_id + .map_or_else( + || { + UserId::parse_with_server_name( + info.registration.sender_localpart.as_str(), + services().globals.server_name(), + ) + }, + UserId::parse, + ) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ) + })?; + + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + } + + if !services().users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User does not exist.", + )); + } + + (Some(user_id), None, None, Some(*info)) + } + ( + AuthScheme::None + | AuthScheme::AppserviceToken + | AuthScheme::AppserviceTokenOptional + | AuthScheme::AccessTokenOptional, + Token::Appservice(info), + ) => (None, None, None, Some(*info)), + (AuthScheme::AccessToken, Token::None) => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing access token.", + )); + } + ( + AuthScheme::AccessToken + | AuthScheme::AccessTokenOptional + | AuthScheme::None, + Token::User((user_id, device_id)), + ) => (Some(user_id), Some(device_id), None, None), + (AuthScheme::ServerSignatures, Token::None) => { + let TypedHeader(Authorization(x_matrix)) = parts + .extract::>>() + .await + .map_err(|error| { + warn!(%error, "Missing or invalid Authorization header"); + + let msg = match error.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + _ => "Unknown header-related error", + }; + + Error::BadRequest(ErrorKind::forbidden(), msg) + })?; + + if let Some(destination) = x_matrix.destination { + if destination != services().globals.server_name() { + warn!( + %destination, + "Incorrect destination in X-Matrix header" + ); + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Incorrect destination in X-Matrix header", + )); + } + } + + let origin_signatures = BTreeMap::from_iter([( + x_matrix.key.to_string(), + CanonicalJsonValue::String(x_matrix.sig.to_string()), + )]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let x_matrix_uri = parts + .uri + .path_and_query() + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "No HTTP path/query", + ) + })? + .to_string(); + let mut request_map = BTreeMap::from_iter([ + ( + "method".to_owned(), + CanonicalJsonValue::String(parts.method.to_string()), + ), + ( + "uri".to_owned(), + CanonicalJsonValue::String(x_matrix_uri), + ), + ( + "origin".to_owned(), + CanonicalJsonValue::String( + x_matrix.origin.as_str().to_owned(), + ), + ), + ( + "destination".to_owned(), + CanonicalJsonValue::String( + services() + .globals + .server_name() + .as_str() + .to_owned(), + ), + ), + ( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + } + + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys( + &x_matrix.origin, + vec![x_matrix.key.to_string()], + false, + ) + .await; + + let keys = match keys_result { + Ok(b) => b, + Err(error) => { + warn!(%error, "Failed to fetch signing keys"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Failed to fetch signing keys.", + )); + } + }; + + // Only verify_keys that are currently valid should be used for + // validating requests as per MSC4029 + let pub_key_map = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + if keys.valid_until_ts > MilliSecondsSinceUnixEpoch::now() { + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect() + } else { + BTreeMap::new() + }, + )]); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) + { + Ok(()) => (None, None, Some(x_matrix.origin), None), + Err(error) => { + warn!( + %error, + origin = %x_matrix.origin, + object = ?request_map, + "Failed to verify JSON request" + ); + + if parts.uri.to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make \ + sure your reverse proxy gives Grapevine the \ + raw uri (apache: use nocanon)" + ); + } + + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Failed to verify X-Matrix signatures.", + )); + } + } + } + ( + AuthScheme::None + | AuthScheme::AppserviceToken + | AuthScheme::AppserviceTokenOptional + | AuthScheme::AccessTokenOptional, + Token::None, + ) => (None, None, None, None), + ( + AuthScheme::ServerSignatures, + Token::Appservice(_) | Token::User(_), + ) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only server signatures should be used on this endpoint.", + )); + } + ( + AuthScheme::AppserviceToken + | AuthScheme::AppserviceTokenOptional, + Token::User(_), + ) => { + return Err(Error::BadRequest( + ErrorKind::Unauthorized, + "Only appservice access tokens should be used on this \ + endpoint.", )); } }; - let token = match &auth_header { - Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), - None => query_params.access_token.as_deref(), - }; + let mut http_request = + Request::builder().uri(parts.uri).method(parts.method); + *http_request.headers_mut().unwrap() = parts.headers; - let token = if let Some(token) = token { - if let Some(reg_info) = services().appservice.find_from_token(token).await { - Token::Appservice(Box::new(reg_info.clone())) - } else if let Some((user_id, device_id)) = services().users.find_from_token(token)? { - Token::User((user_id, OwnedDeviceId::from(device_id))) - } else { - Token::Invalid - } - } else { - Token::None - }; + if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", services().globals.server_name()) + .expect("we know this is valid") + }); - let mut json_body = serde_json::from_slice::(&body).ok(); - - let (sender_user, sender_device, sender_servername, appservice_info) = - match (metadata.authentication, token) { - (_, Token::Invalid) => { - return Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown access token.", - )) - } - (AuthScheme::AccessToken, Token::Appservice(info)) => { - let user_id = query_params - .user_id - .map_or_else( - || { - UserId::parse_with_server_name( - info.registration.sender_localpart.as_str(), - services().globals.server_name(), - ) - }, - UserId::parse, - ) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; - - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); - } - - if !services().users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "User does not exist.", - )); - } - - (Some(user_id), None, None, Some(*info)) - } - ( - AuthScheme::None - | AuthScheme::AppserviceToken - | AuthScheme::AccessTokenOptional, - Token::Appservice(info), - ) => (None, None, None, Some(*info)), - (AuthScheme::AccessToken, Token::None) => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing access token.", - )); - } - ( - AuthScheme::AccessToken | AuthScheme::AccessTokenOptional | AuthScheme::None, - Token::User((user_id, device_id)), - ) => (Some(user_id), Some(device_id), None, None), - (AuthScheme::ServerSignatures, Token::None) => { - let TypedHeader(Authorization(x_matrix)) = parts - .extract::>>() - .await - .map_err(|e| { - warn!("Missing or invalid Authorization header: {}", e); - - let msg = match e.reason() { - TypedHeaderRejectionReason::Missing => { - "Missing Authorization header." - } - TypedHeaderRejectionReason::Error(_) => { - "Invalid X-Matrix signatures." - } - _ => "Unknown header-related error", - }; - - Error::BadRequest(ErrorKind::Forbidden, msg) - })?; - - let origin_signatures = BTreeMap::from_iter([( - x_matrix.key.clone(), - CanonicalJsonValue::String(x_matrix.sig), - )]); - - let signatures = BTreeMap::from_iter([( - x_matrix.origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - )]); - - let mut request_map = BTreeMap::from_iter([ - ( - "method".to_owned(), - CanonicalJsonValue::String(parts.method.to_string()), - ), - ( - "uri".to_owned(), - CanonicalJsonValue::String(parts.uri.to_string()), - ), - ( - "origin".to_owned(), - CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), - ), - ( - "destination".to_owned(), - CanonicalJsonValue::String( - services().globals.server_name().as_str().to_owned(), - ), - ), - ( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ), - ]); - - if let Some(json_body) = &json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - let keys_result = services() - .rooms - .event_handler - .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) - .await; - - let keys = match keys_result { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Failed to fetch signing keys.", - )); - } - }; - - let pub_key_map = - BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(x_matrix.origin), None), - Err(e) => { - warn!( - "Failed to verify json request from {}: {}\n{:?}", - x_matrix.origin, e, request_map - ); - - if parts.uri.to_string().contains('@') { - warn!( - "Request uri contained '@' character. Make sure your \ - reverse proxy gives Conduit the raw uri (apache: use \ - nocanon)" - ); - } - - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Failed to verify X-Matrix signatures.", - )); - } - } - } - ( - AuthScheme::None - | AuthScheme::AppserviceToken - | AuthScheme::AccessTokenOptional, - Token::None, - ) => (None, None, None, None), - (AuthScheme::ServerSignatures, Token::Appservice(_) | Token::User(_)) => { - return Err(Error::BadRequest( - ErrorKind::Unauthorized, - "Only server signatures should be used on this endpoint.", - )); - } - (AuthScheme::AppserviceToken, Token::User(_)) => { - return Err(Error::BadRequest( - ErrorKind::Unauthorized, - "Only appservice access tokens should be used on this endpoint.", - )); - } - }; - - let mut http_request = http::Request::builder().uri(parts.uri).method(parts.method); - *http_request.headers_mut().unwrap() = parts.headers; - - if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { - let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", services().globals.server_name()) - .expect("we know this is valid") + let uiaa_request = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + services().uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) }); - let uiaa_request = json_body - .get("auth") - .and_then(|auth| auth.as_object()) - .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) - .and_then(|session| { - services().uiaa.get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request + { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); + } + } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, json_body) + .expect("value serialization can't fail"); + body = buf.into_inner().freeze(); + } + let http_request = http_request.body(body).unwrap(); + + Ok(ArPieces { + sender_user, + sender_device, + sender_servername, + json_body, + appservice_info, + path_params, + http_request, + }) +} + +#[async_trait] +impl FromRequest for Ar +where + T: IncomingRequest, + S: Sync, +{ + type Rejection = Error; + + #[tracing::instrument("ar_from_request", skip_all)] + async fn from_request( + req: axum::extract::Request, + _state: &S, + ) -> Result { + let pieces = ar_from_request_inner(req, T::METADATA).await?; + + let body = + T::try_from_http_request(pieces.http_request, &pieces.path_params) + .map_err(|error| { + warn!( + %error, + body = ?pieces.json_body, + "Request body JSON structure is incorrect" + ); + Error::BadRequest( + ErrorKind::BadJson, + "Failed to deserialize request.", ) - }); + })?; - if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } - } - - let mut buf = BytesMut::new().writer(); - serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); - body = buf.into_inner().freeze(); - } - - let http_request = http_request.body(&*body).unwrap(); - - debug!("{:?}", http_request); - - let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!("try_from_http_request failed: {:?}", e); - debug!("JSON body: {:?}", json_body); - Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") - })?; - - Ok(Ruma { + Ok(Ar { body, - sender_user, - sender_device, - sender_servername, - appservice_info, - json_body, + sender_user: pieces.sender_user, + sender_device: pieces.sender_device, + sender_servername: pieces.sender_servername, + json_body: pieces.json_body, + appservice_info: pieces.appservice_info, }) } } - -struct XMatrix { - origin: OwnedServerName, - key: String, // KeyName? - sig: String, -} - -impl Credentials for XMatrix { - const SCHEME: &'static str = "X-Matrix"; - - fn decode(value: &http::HeaderValue) -> Option { - debug_assert!( - value.as_bytes().starts_with(b"X-Matrix "), - "HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}", - ); - - let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) - .ok()? - .trim_start(); - - let mut origin = None; - let mut key = None; - let mut sig = None; - - for entry in parameters.split_terminator(',') { - let (name, value) = entry.split_once('=')?; - - // It's not at all clear why some fields are quoted and others not in the spec, - // let's simply accept either form for every field. - let value = value - .strip_prefix('"') - .and_then(|rest| rest.strip_suffix('"')) - .unwrap_or(value); - - // FIXME: Catch multiple fields of the same name - match name { - "origin" => origin = Some(value.try_into().ok()?), - "key" => key = Some(value.to_owned()), - "sig" => sig = Some(value.to_owned()), - _ => debug!( - "Unexpected field `{}` in X-Matrix Authorization header", - name - ), - } - } - - Some(Self { - origin: origin?, - key: key?, - sig: sig?, - }) - } - - fn encode(&self) -> http::HeaderValue { - todo!() - } -} - -impl IntoResponse for RumaResponse { +impl IntoResponse for Ra { fn into_response(self) -> Response { match self.0.try_into_http_response::() { - Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Ok(res) => { + res.map(BytesMut::freeze).map(Body::from).into_response() + } Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), } } } - -// copied from hyper under the following license: -// Copyright (c) 2014-2021 Sean McArthur - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -pub(crate) async fn to_bytes(body: T) -> Result -where - T: HttpBody, -{ - futures_util::pin_mut!(body); - - // If there's only 1 chunk, we can just return Buf::to_bytes() - let mut first = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(Bytes::new()); - }; - - let second = if let Some(buf) = body.data().await { - buf? - } else { - return Ok(first.copy_to_bytes(first.remaining())); - }; - - // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; - let mut vec = Vec::with_capacity(cap); - vec.put(first); - vec.put(second); - - while let Some(buf) = body.data().await { - vec.put(buf?); - } - - Ok(vec.into()) -} diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs deleted file mode 100644 index 862da1dc..00000000 --- a/src/api/ruma_wrapper/mod.rs +++ /dev/null @@ -1,43 +0,0 @@ -use crate::{service::appservice::RegistrationInfo, Error}; -use ruma::{ - api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, - OwnedUserId, -}; -use std::ops::Deref; - -#[cfg(feature = "conduit_bin")] -mod axum; - -/// Extractor for Ruma request structs -pub struct Ruma { - pub body: T, - pub sender_user: Option, - pub sender_device: Option, - pub sender_servername: Option, - // This is None when body is not a valid string - pub json_body: Option, - pub appservice_info: Option, -} - -impl Deref for Ruma { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.body - } -} - -#[derive(Clone)] -pub struct RumaResponse(pub T); - -impl From for RumaResponse { - fn from(t: T) -> Self { - Self(t) - } -} - -impl From for RumaResponse { - fn from(t: Error) -> Self { - t.to_response() - } -} diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b25b1313..39b208f0 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,34 +1,48 @@ -#![allow(deprecated)] - -use crate::{ - api::client_server::{self, claim_keys_helper, get_keys_helper}, - service::pdu::{gen_event_id_canonical_json, PduBuilder}, - services, utils, Error, PduEvent, Result, Ruma, +use std::{ + collections::BTreeMap, + fmt::Debug, + mem, + net::{IpAddr, SocketAddr}, + sync::Arc, + time::Instant, }; -use axum::{response::IntoResponse, Json}; -use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION}; +use axum::{response::IntoResponse, Json}; +use axum_extra::headers::{Authorization, HeaderMapExt}; +use base64::Engine as _; +use get_profile_information::v1::ProfileField; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, federation::{ + authenticated_media, + authentication::XMatrix, authorization::get_event_authorization, backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + discovery::{ + get_server_keys, get_server_version, ServerSigningKeys, + }, + event::{ + get_event, get_missing_events, get_room_state, + get_room_state_ids, + }, keys::{claim_keys, get_keys}, - membership::{create_invite, create_join_event, prepare_join_event}, + membership::{ + create_invite, create_join_event, prepare_join_event, + }, query::{get_profile_information, get_room_information}, transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, + edu::{ + DeviceListUpdateContent, DirectDeviceContent, Edu, + SigningKeyUpdateContent, + }, send_transaction_message, }, }, - EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, - SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, + OutgoingResponse, SendAccessToken, }, directory::{Filter, RoomNetwork}, events::{ @@ -40,33 +54,40 @@ use ruma::{ StateEventType, TimelineEventType, }, serde::{Base64, JsonObject, Raw}, + state_res::Event, to_device::DeviceIdOrAllDevices, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, - ServerName, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedServerSigningKeyId, OwnedSigningKeyId, OwnedUserId, RoomId, + ServerName, Signatures, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use std::{ - collections::BTreeMap, - fmt::Debug, - mem, - net::{IpAddr, SocketAddr}, - sync::Arc, - time::{Duration, Instant, SystemTime}, -}; use tokio::sync::RwLock; +use tracing::{debug, error, field, trace, trace_span, warn}; -use tracing::{debug, error, warn}; +use super::appservice_server; +use crate::{ + api::client_server::{self, claim_keys_helper, get_keys_helper}, + observability::{FoundIn, Lookup, METRICS}, + service::{ + globals::SigningKeys, + pdu::{gen_event_id_canonical_json, PduBuilder}, + rooms::state::ExtractVersion, + }, + services, + utils::{self, dbg_truncate_str, MxcData}, + Ar, Error, PduEvent, Ra, Result, +}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). /// -/// Note: A `FedDest::Named` might contain an IP address in string form if there -/// was no port specified to construct a SocketAddr with. +/// Note: A [`FedDest::Named`] might contain an IP address in string form if +/// there was no port specified to construct a [`SocketAddr`] with. /// /// # Examples: /// ```rust -/// # use conduit::api::server_server::FedDest; +/// # use grapevine::api::server_server::FedDest; /// # fn main() -> Result<(), std::net::AddrParseError> { /// FedDest::Literal("198.51.100.3:8448".parse()?); /// FedDest::Literal("[2001:db8::4:5]:443".parse()?); @@ -77,23 +98,23 @@ use tracing::{debug, error, warn}; /// # } /// ``` #[derive(Clone, Debug, PartialEq, Eq)] -pub enum FedDest { +pub(crate) enum FedDest { Literal(SocketAddr), Named(String, String), } impl FedDest { - fn into_https_string(self) -> String { + fn to_https_string(&self) -> String { match self { Self::Literal(addr) => format!("https://{addr}"), Self::Named(host, port) => format!("https://{host}{port}"), } } - fn into_uri_string(self) -> String { + fn to_uri_string(&self) -> String { match self { Self::Literal(addr) => addr.to_string(), - Self::Named(host, ref port) => host + port, + Self::Named(host, port) => format!("{host}{port}"), } } @@ -107,30 +128,49 @@ impl FedDest { fn port(&self) -> Option { match &self { Self::Literal(addr) => Some(addr.port()), - Self::Named(_, port) => port[1..].parse().ok(), + Self::Named(_, port) => { + port.strip_prefix(':').and_then(|x| x.parse().ok()) + } } } } -#[tracing::instrument(skip(request))] -pub(crate) async fn send_request( +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum LogRequestError { + Yes, + No, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum AllowLoopbackRequests { + Yes, + No, +} + +#[allow(clippy::too_many_lines)] +#[tracing::instrument(skip(request, log_error, allow_loopback), fields(url))] +pub(crate) async fn send_request( destination: &ServerName, request: T, + log_error: LogRequestError, + allow_loopback: AllowLoopbackRequests, ) -> Result where - T: Debug, + T: OutgoingRequest + Debug, { if !services().globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); + return Err(Error::BadConfig("Federation is disabled.")); } - if destination == services().globals.server_name() { + if destination == services().globals.server_name() + && allow_loopback == AllowLoopbackRequests::No + { return Err(Error::bad_config( "Won't send federation request to ourselves", )); } - debug!("Preparing to send request to {destination}"); + debug!("Preparing to send request"); let mut write_destination_to_cache = false; @@ -143,27 +183,29 @@ where .cloned(); let (actual_destination, host) = if let Some(result) = cached_result { + METRICS.record_lookup(Lookup::FederationDestination, FoundIn::Cache); result } else { write_destination_to_cache = true; let result = find_actual_destination(destination).await; - (result.0, result.1.into_uri_string()) + (result.0, result.1.to_uri_string()) }; - let actual_destination_str = actual_destination.clone().into_https_string(); + let actual_destination_str = actual_destination.to_https_string(); let mut http_request = request .try_into_http_request::>( &actual_destination_str, SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_4], + &[MatrixVersion::V1_11], ) - .map_err(|e| { + .map_err(|error| { warn!( - "Failed to find destination {}: {}", - actual_destination_str, e + %error, + actual_destination = actual_destination_str, + "Failed to find destination", ); Error::BadServerResponse("Invalid destination") })?; @@ -176,9 +218,10 @@ where serde_json::from_slice(http_request.body()) .expect("body is valid json, we just created it"), ); - }; + } - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map + .insert("method".to_owned(), T::METADATA.method.to_string().into()); request_map.insert( "uri".to_owned(), http_request @@ -194,8 +237,8 @@ where ); request_map.insert("destination".to_owned(), destination.as_str().into()); - let mut request_json = - serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + let mut request_json = serde_json::from_value(request_map.into()) + .expect("valid JSON is valid BTreeMap"); ruma::signatures::sign_json( services().globals.server_name().as_str(), @@ -205,121 +248,108 @@ where .expect("our request json is what ruma expects"); let request_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()) + .unwrap(); - let signatures = request_json["signatures"] + // There's exactly the one signature we just created, fish it back out again + let (key_id, signature) = request_json["signatures"] + .get(services().globals.server_name().as_str()) + .unwrap() .as_object() .unwrap() - .values() - .map(|v| { - v.as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())) - }); + .iter() + .next() + .unwrap(); - for signature_server in signatures { - for s in signature_server { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - services().globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); - } - } + let key_id = OwnedSigningKeyId::try_from(key_id.clone()).unwrap(); + let signature = Base64::parse(signature.as_str().unwrap()) + .expect("generated signature should be valid base64"); + http_request.headers_mut().typed_insert(Authorization(XMatrix::new( + services().globals.server_name().to_owned(), + destination.to_owned(), + key_id, + signature, + ))); + + // can be enabled selectively using `filter = + // grapevine[outgoing_request_curl]=trace` in config + trace_span!("outgoing_request_curl").in_scope(|| { + trace!( + cmd = utils::curlify(&http_request), + "curl command line for outgoing request" + ); + }); let reqwest_request = reqwest::Request::try_from(http_request)?; let url = reqwest_request.url().clone(); + tracing::Span::current().record("url", field::display(url)); - debug!("Sending request to {destination} at {url}"); - let response = services() - .globals - .federation_client() - .execute(reqwest_request) - .await; - debug!("Received response from {destination} at {url}"); + debug!("Sending request"); + let response = + services().globals.federation_client().execute(reqwest_request).await; - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - debug!("Getting response bytes from {destination}"); - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - debug!("Got response bytes from {destination}"); - - if status != 200 { - warn!( - "{} {}: {}", - url, - status, - String::from_utf8_lossy(&body) - .lines() - .collect::>() - .join(" ") - ); - } - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - if status == 200 { - debug!("Parsing response bytes from {destination}"); - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - services() - .globals - .actual_destination_cache - .write() - .await - .insert( - OwnedServerName::from(destination), - (actual_destination, host), - ); - } - - response.map_err(|e| { - warn!( - "Invalid 200 response from {} on: {} {}", - &destination, url, e - ); - Error::BadServerResponse("Server returned bad 200 response.") - }) - } else { - debug!("Returning error from {destination}"); - Err(Error::FederationError( - destination.to_owned(), - RumaError::from_http_response(http_response), - )) - } - } - Err(e) => { - warn!( - "Could not send request to {} at {}: {}", - destination, actual_destination_str, e - ); - Err(e.into()) + let mut response = response.inspect_err(|error| { + if log_error == LogRequestError::Yes { + warn!(%error, "Could not send request"); } + })?; + + // reqwest::Response -> http::Response conversion + let status = response.status(); + debug!(status = u16::from(status), "Received response"); + let mut http_response_builder = + http::Response::builder().status(status).version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + debug!("Getting response bytes"); + // TODO: handle timeout + let body = response.bytes().await.unwrap_or_else(|error| { + warn!(%error, "Server error"); + Vec::new().into() + }); + debug!("Got response bytes"); + + if status != 200 { + warn!( + status = u16::from(status), + response = + dbg_truncate_str(String::from_utf8_lossy(&body).as_ref(), 100) + .into_owned(), + "Received error over federation", + ); } + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status != 200 { + return Err(Error::Federation( + destination.to_owned(), + RumaError::from_http_response(http_response), + )); + } + + debug!("Parsing response bytes"); + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + METRICS.record_lookup(Lookup::FederationDestination, FoundIn::Remote); + services().globals.actual_destination_cache.write().await.insert( + OwnedServerName::from(destination), + (actual_destination, host), + ); + } + + response.map_err(|e| { + warn!(error = %e, "Invalid 200 response"); + Error::BadServerResponse("Server returned bad 200 response.") + }) } fn get_ip_with_port(destination_str: &str) -> Option { @@ -340,11 +370,16 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { FedDest::Named(host.to_owned(), port.to_owned()) } -/// Returns: actual_destination, host header +/// Returns: `actual_destination`, `Host` header /// Implemented according to the specification at -/// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { - debug!("Finding actual destination for {destination}"); +/// Numbers in comments below refer to bullet points in linked section of +/// specification +#[allow(clippy::too_many_lines)] +#[tracing::instrument(ret(level = "debug"))] +async fn find_actual_destination( + destination: &'_ ServerName, +) -> (FedDest, FedDest) { + debug!("Finding actual destination"); let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { @@ -358,107 +393,116 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - debug!("Requesting well known for {destination}"); - match request_well_known(destination.as_str()).await { - Some(delegated_hostname) => { - debug!("3: A .well-known file is available"); - hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = delegated_hostname.find(':') { - debug!("3.2: Hostname with port in .well-known file"); - let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - debug!("Delegated hostname has no port in this branch"); - if let Some(hostname_override) = - query_srv_record(&delegated_hostname).await - { - debug!("3.3: SRV lookup successful"); - let force_port = hostname_override.port(); + debug!(%destination, "Requesting well known"); + if let Some(delegated_hostname) = + request_well_known(destination.as_str()).await + { + debug!("3: A .well-known file is available"); + hostname = add_port_to_hostname(&delegated_hostname) + .to_uri_string(); + if let Some(host_and_port) = + get_ip_with_port(&delegated_hostname) + { + host_and_port + } else if let Some(pos) = delegated_hostname.find(':') { + debug!("3.2: Hostname with port in .well-known file"); + let (host, port) = delegated_hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + debug!("Delegated hostname has no port in this branch"); + if let Some(hostname_override) = + query_srv_record(&delegated_hostname).await + { + debug!("3.3: SRV lookup successful"); + let force_port = hostname_override.port(); - if let Ok(override_ip) = services() - .globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - services() - .globals - .tls_name_override - .write() - .unwrap() - .insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{port}")) - } else { - add_port_to_hostname(&delegated_hostname) - } - } else { - debug!("3.4: No SRV records, just use the hostname from .well-known"); - add_port_to_hostname(&delegated_hostname) - } - } + if let Ok(override_ip) = services() + .globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!( + "Using SRV record, but could not resolve \ + to IP" + ); } + + if let Some(port) = force_port { + FedDest::Named( + delegated_hostname, + format!(":{port}"), + ) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { + debug!( + "3.4: No SRV records, just use the hostname \ + from .well-known" + ); + add_port_to_hostname(&delegated_hostname) } } - None => { - debug!("4: No .well-known or an error occured"); - match query_srv_record(&destination_str).await { - Some(hostname_override) => { - debug!("4: SRV record found"); - let force_port = hostname_override.port(); + } else { + debug!("4: No .well-known or an error occured"); + if let Some(hostname_override) = + query_srv_record(&destination_str).await + { + debug!("4: SRV record found"); + let force_port = hostname_override.port(); - if let Ok(override_ip) = services() - .globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - services() - .globals - .tls_name_override - .write() - .unwrap() - .insert( - hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{port}")) - } else { - add_port_to_hostname(&hostname) - } - } - None => { - debug!("5: No SRV record found"); - add_port_to_hostname(&destination_str) - } + if let Ok(override_ip) = services() + .globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!( + "Using SRV record, but could not resolve to IP" + ); } + + if let Some(port) = force_port { + FedDest::Named(hostname.clone(), format!(":{port}")) + } else { + add_port_to_hostname(&hostname) + } + } else { + debug!("5: No SRV record found"); + add_port_to_hostname(&destination_str) } } } } }; - debug!("Actual destination: {actual_destination:?}"); + debug!(?actual_destination, "Resolved actual destination"); // Can't use get_ip_with_port here because we don't want to add a port // to an IP address if it wasn't specified @@ -475,6 +519,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe (actual_destination, hostname) } +#[tracing::instrument(ret(level = "debug"))] async fn query_given_srv_record(record: &str) -> Option { services() .globals @@ -484,7 +529,11 @@ async fn query_given_srv_record(record: &str) -> Option { .map(|srv| { srv.iter().next().map(|result| { FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), + result + .target() + .to_string() + .trim_end_matches('.') + .to_owned(), format!(":{}", result.port()), ) }) @@ -492,10 +541,12 @@ async fn query_given_srv_record(record: &str) -> Option { .unwrap_or(None) } +#[tracing::instrument(ret(level = "debug"))] async fn query_srv_record(hostname: &'_ str) -> Option { let hostname = hostname.trim_end_matches('.'); - if let Some(host_port) = query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await + if let Some(host_port) = + query_given_srv_record(&format!("_matrix-fed._tcp.{hostname}.")).await { Some(host_port) } else { @@ -503,16 +554,17 @@ async fn query_srv_record(hostname: &'_ str) -> Option { } } +#[tracing::instrument(ret(level = "debug"))] async fn request_well_known(destination: &str) -> Option { let response = services() .globals .default_client() - .get(&format!("https://{destination}/.well-known/matrix/server")) + .get(format!("https://{destination}/.well-known/matrix/server")) .send() .await; debug!("Got well known response"); - if let Err(e) = &response { - debug!("Well known error: {e:?}"); + if let Err(error) = &response { + debug!(%error, "Failed to request .well-known"); return None; } let text = response.ok()?.text().await; @@ -524,45 +576,51 @@ async fn request_well_known(destination: &str) -> Option { /// # `GET /_matrix/federation/v1/version` /// /// Get version information on this server. -pub async fn get_server_version_route( - _body: Ruma, -) -> Result { - Ok(get_server_version::v1::Response { +pub(crate) async fn get_server_version_route( + _body: Ar, +) -> Result> { + Ok(Ra(get_server_version::v1::Response { server: Some(get_server_version::v1::Server { - name: Some("Conduit".to_owned()), - version: Some(env!("CARGO_PKG_VERSION").to_owned()), + name: Some(env!("CARGO_PKG_NAME").to_owned()), + version: Some(crate::version()), }), - }) + })) } /// # `GET /_matrix/key/v2/server` /// /// Gets the public signing keys of this server. /// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -// Response type for this endpoint is Json because we need to calculate a signature for the response -pub async fn get_server_keys_route() -> Result { - let mut verify_keys: BTreeMap = BTreeMap::new(); - verify_keys.insert( - format!("ed25519:{}", services().globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(services().globals.keypair().public_key().to_vec()), - }, - ); +/// - Matrix does not support invalidating public keys, so the key returned by +/// this will be valid forever. +// Response type for this endpoint is Json because we need to calculate a +// signature for the response +pub(crate) async fn get_server_keys_route() -> Result { + fn convert_key_ids( + keys: BTreeMap, + ) -> BTreeMap { + keys.into_iter() + .map(|(id, key)| { + let id = id + .try_into() + .expect("found invalid server signing keys in DB"); + (id, key) + }) + .collect() + } + + let keys = SigningKeys::load_own_keys(); + let verify_keys = convert_key_ids(keys.verify_keys); + let old_verify_keys = convert_key_ids(keys.old_verify_keys); + let mut response = serde_json::from_slice( get_server_keys::v2::Response { server_key: Raw::new(&ServerSigningKeys { server_name: services().globals.server_name().to_owned(), verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(86400 * 7), - ) - .expect("time is valid"), + old_verify_keys, + signatures: Signatures::new(), + valid_until_ts: keys.valid_until_ts, }) .expect("static conversion, no errors"), } @@ -586,18 +644,18 @@ pub async fn get_server_keys_route() -> Result { /// /// Gets the public signing keys of this server. /// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { +/// - Matrix does not support invalidating public keys, so the key returned by +/// this will be valid forever. +pub(crate) async fn get_server_keys_deprecated_route() -> impl IntoResponse { get_server_keys_route().await } /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -pub async fn get_public_rooms_filtered_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_public_rooms_filtered_route( + body: Ar, +) -> Result> { let response = client_server::get_public_rooms_filtered_helper( None, body.limit, @@ -607,20 +665,20 @@ pub async fn get_public_rooms_filtered_route( ) .await?; - Ok(get_public_rooms_filtered::v1::Response { + Ok(Ra(get_public_rooms_filtered::v1::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - }) + })) } /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -pub async fn get_public_rooms_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_public_rooms_route( + body: Ar, +) -> Result> { let response = client_server::get_public_rooms_filtered_helper( None, body.limit, @@ -630,21 +688,23 @@ pub async fn get_public_rooms_route( ) .await?; - Ok(get_public_rooms::v1::Response { + Ok(Ra(get_public_rooms::v1::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - }) + })) } -pub fn parse_incoming_pdu( +#[tracing::instrument(skip(pdu))] +pub(crate) fn parse_incoming_pdu( pdu: &RawJsonValue, ) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; + let value: CanonicalJsonObject = + serde_json::from_str(pdu.get()).map_err(|error| { + warn!(%error, object = ?pdu, "Error parsing incoming event"); + Error::BadServerResponse("Invalid PDU in server response") + })?; let room_id: OwnedRoomId = value .get("room_id") @@ -654,17 +714,22 @@ pub fn parse_incoming_pdu( "Invalid room id in pdu", ))?; - let room_version_id = services().rooms.state.get_room_version(&room_id)?; + let room_version_id = services() + .rooms + .state + .get_create_content::(&room_id)?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id)); + }; - let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } + let Ok((event_id, value)) = + gen_event_id_canonical_json(pdu, &room_version_rules) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); }; Ok((event_id, value, room_id)) } @@ -672,31 +737,23 @@ pub fn parse_incoming_pdu( /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -pub async fn send_transaction_message_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +#[allow(clippy::too_many_lines)] +pub(crate) async fn send_transaction_message_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); let mut resolved_map = BTreeMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - // let mut auth_cache = EventMap::new(); - for pdu in &body.pdus { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) + .map_err(|error| { + warn!(%error, object = ?pdu, "Error parsing incoming event"); + Error::BadServerResponse("Invalid PDU in server response") + })?; let room_id: OwnedRoomId = value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) @@ -705,32 +762,32 @@ pub async fn send_transaction_message_route( "Invalid room id in pdu", ))?; - if services().rooms.state.get_room_version(&room_id).is_err() { - debug!("Server is not in room {room_id}"); + if services() + .rooms + .state + .get_create_content::(&room_id) + .is_err() + { + debug!(%room_id, "This server is not in the room"); continue; } let r = parse_incoming_pdu(pdu); let (event_id, value, room_id) = match r { Ok(t) => t, - Err(e) => { - warn!("Could not parse PDU: {e}"); - warn!("Full PDU: {:?}", &pdu); + Err(error) => { + warn!(%error, object = ?pdu, "Error parsing incoming event"); continue; } }; - // We do not add the event_id field to the pdu here because of signature and hashes checks + // We do not add the event_id field to the pdu here because of signature + // and hashes checks - let mutex = Arc::clone( - services() - .globals - .roomid_mutex_federation - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; + let federation_token = services() + .globals + .roomid_mutex_federation + .lock_key(room_id.clone()) + .await; let start_time = Instant::now(); resolved_map.insert( event_id.clone(), @@ -748,21 +805,19 @@ pub async fn send_transaction_message_route( .await .map(|_| ()), ); - drop(mutex_lock); + drop(federation_token); - let elapsed = start_time.elapsed(); debug!( - "Handling transaction of event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 + %event_id, + elapsed = ?start_time.elapsed(), + "Finished handling event", ); } for pdu in &resolved_map { - if let Err(e) = pdu.1 { - if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { - warn!("Incoming PDU failed {:?}", pdu); + if let (event_id, Err(error)) = pdu { + if matches!(error, Error::BadRequest(ErrorKind::NotFound, _)) { + warn!(%error, %event_id, "Incoming PDU failed"); } } } @@ -773,10 +828,18 @@ pub async fn send_transaction_message_route( .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { - Edu::Presence(_) => {} Edu::Receipt(receipt) => { for (room_id, room_updates) in receipt.receipts { for (user_id, user_updates) in room_updates.read { + if user_id.server_name() != sender_servername { + warn!( + %user_id, + %sender_servername, + "Got receipt EDU from incorrect homeserver, \ + ignoring", + ); + continue; + } if let Some((event_id, _)) = user_updates .event_ids .iter() @@ -792,13 +855,15 @@ pub async fn send_transaction_message_route( .max_by_key(|(_, count)| *count) { let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); + user_receipts + .insert(user_id.clone(), user_updates.data); let mut receipts = BTreeMap::new(); receipts.insert(ReceiptType::Read, user_receipts); let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); + receipt_content + .insert(event_id.to_owned(), receipts); let event = ReceiptEvent { content: ReceiptEventContent(receipt_content), @@ -808,15 +873,28 @@ pub async fn send_transaction_message_route( .rooms .edus .read_receipt - .readreceipt_update(&user_id, &room_id, event)?; + .readreceipt_update( + &user_id, &room_id, event, + )?; } else { // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); + debug!( + ?user_updates, + "No known event ids in read receipt", + ); } } } } Edu::Typing(typing) => { + if typing.user_id.server_name() != sender_servername { + warn!( + user_id = %typing.user_id, + %sender_servername, + "Got typing EDU from incorrect homeserver, ignoring", + ); + continue; + } if services() .rooms .state_cache @@ -843,7 +921,19 @@ pub async fn send_transaction_message_route( } } } - Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { + Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + .. + }) => { + if user_id.server_name() != sender_servername { + warn!( + %user_id, + %sender_servername, + "Got device list update EDU from incorrect homeserver, \ + ignoring", + ); + continue; + } services().users.mark_device_key_update(&user_id)?; } Edu::DirectToDevice(DirectDeviceContent { @@ -852,60 +942,79 @@ pub async fn send_transaction_message_route( message_id, messages, }) => { + if sender.server_name() != sender_servername { + warn!( + user_id = %sender, + %sender_servername, + "Got direct-to-device EDU from incorrect homeserver, \ + ignoring", + ); + continue; + } // Check if this is a new transaction id if services() .transaction_ids .existing_txnid(&sender, None, &message_id)? - .is_some() + .is_none() { - continue; - } - - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { - services().users.add_to_device_event( + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId( + target_device_id, + ) => services().users.add_to_device_event( &sender, target_user_id, target_device_id, &ev_type.to_string(), - event.deserialize_as().map_err(|e| { - warn!("To-Device event is invalid: {event:?} {e}"); - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - )? - } - - DeviceIdOrAllDevices::AllDevices => { - for target_device_id in - services().users.all_device_ids(target_user_id) - { - services().users.add_to_device_event( - &sender, - target_user_id, - &target_device_id?, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { + event.deserialize_as().map_err( + |error| { + warn!( + %error, + object = ?event.json(), + "To-Device event is invalid", + ); Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", ) - })?, - )?; + }, + )?, + )?, + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in services() + .users + .all_device_ids(target_user_id) + { + services().users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err( + |_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + }, + )?, + )?; + } } } } } - } - // Save transaction id with empty data - services() - .transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; + // Save transaction id with empty data + services().transaction_ids.add_txnid( + &sender, + None, + &message_id, + &[], + )?; + } } Edu::SigningKeyUpdate(SigningKeyUpdateContent { user_id, @@ -913,59 +1022,64 @@ pub async fn send_transaction_message_route( self_signing_key, }) => { if user_id.server_name() != sender_servername { + warn!( + %user_id, + %sender_servername, + "Got signing key update from incorrect homeserver, \ + ignoring", + ); continue; } if let Some(master_key) = master_key { services().users.add_cross_signing_keys( &user_id, &master_key, - &self_signing_key, - &None, + self_signing_key.as_ref(), + None, true, )?; } } - Edu::_Custom(_) => {} + Edu::_Custom(_) | Edu::Presence(_) => {} } } - Ok(send_transaction_message::v1::Response { + Ok(Ra(send_transaction_message::v1::Response { pdus: resolved_map .into_iter() .map(|(e, r)| (e, r.map_err(|e| e.sanitized_error()))) .collect(), - }) + })) } /// # `GET /_matrix/federation/v1/event/{eventId}` /// /// Retrieves a single event from the server. /// -/// - Only works if a user of this server is currently invited or joined the room -pub async fn get_event_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +/// - Only works if a user of this server is currently invited or joined the +/// room +pub(crate) async fn get_event_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); - let event = services() - .rooms - .timeline - .get_pdu_json(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + let event = + services().rooms.timeline.get_pdu_json(&body.event_id)?.ok_or_else( + || { + warn!(event_id = %body.event_id, "Event not found"); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + }, + )?; let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; if !services() .rooms @@ -973,7 +1087,7 @@ pub async fn get_event_route( .server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room", )); } @@ -984,31 +1098,29 @@ pub async fn get_event_route( &body.event_id, )? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not allowed to see event.", )); } - Ok(get_event::v1::Response { + Ok(Ra(get_event::v1::Response { origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event(event), - }) + })) } /// # `GET /_matrix/federation/v1/backfill/` /// /// Retrieves events from before the sender joined the room, if the room's /// history visibility allows. -pub async fn get_backfill_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn get_backfill_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); - debug!("Got backfill request from: {}", sender_servername); + debug!(server = %sender_servername, "Got backfill request"); if !services() .rooms @@ -1016,7 +1128,7 @@ pub async fn get_backfill_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1042,11 +1154,11 @@ pub async fn get_backfill_route( let all_events = services() .rooms .timeline - .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .pdus_until(user_id!("@doesntmatter:grapevine"), &body.room_id, until)? .take(limit.try_into().unwrap()); let events = all_events - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter(|(_, e)| { matches!( services().rooms.state_accessor.server_can_see_event( @@ -1062,23 +1174,21 @@ pub async fn get_backfill_route( .map(PduEvent::convert_to_outgoing_federation_event) .collect(); - Ok(get_backfill::v1::Response { + Ok(Ra(get_backfill::v1::Response { origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdus: events, - }) + })) } /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. -pub async fn get_missing_events_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn get_missing_events_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); if !services() .rooms @@ -1086,7 +1196,7 @@ pub async fn get_missing_events_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room", )); } @@ -1100,20 +1210,30 @@ pub async fn get_missing_events_route( let mut events = Vec::new(); let mut i = 0; - while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + while i < queued_events.len() + && events.len() < body.limit.try_into().unwrap_or(usize::MAX) + { + if let Some(pdu) = + services().rooms.timeline.get_pdu_json(&queued_events[i])? + { + let room_id_str = + pdu.get("room_id").and_then(|val| val.as_str()).ok_or_else( + || Error::bad_database("Invalid event in database"), + )?; - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let event_room_id = + <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database( + "Invalid room id field in event in database", + ) + })?; if event_room_id != body.room_id { warn!( - "Evil event detected: Event {} found while searching in room {}", - queued_events[i], body.room_id + event_id = %queued_events[i], + expected_room_id = %body.room_id, + actual_room_id = %event_room_id, + "Evil event detected" ); return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1137,19 +1257,29 @@ pub async fn get_missing_events_route( queued_events.extend_from_slice( &serde_json::from_value::>( - serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no prev_events field.") - })?) + serde_json::to_value( + pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database( + "Event in db has no prev_events field.", + ) + })?, + ) .expect("canonical json is valid json value"), ) - .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + .map_err(|_| { + Error::bad_database( + "Invalid prev_events content in pdu in db.", + ) + })?, ); events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); } i += 1; } - Ok(get_missing_events::v1::Response { events }) + Ok(Ra(get_missing_events::v1::Response { + events, + })) } /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` @@ -1157,13 +1287,11 @@ pub async fn get_missing_events_route( /// Retrieves the auth chain for a given event. /// /// - This does not include the event itself -pub async fn get_event_authorization_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn get_event_authorization_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); if !services() .rooms @@ -1171,7 +1299,7 @@ pub async fn get_event_authorization_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1181,22 +1309,22 @@ pub async fn get_event_authorization_route( .event_handler .acl_check(sender_servername, &body.room_id)?; - let event = services() - .rooms - .timeline - .get_pdu_json(&body.event_id)? - .ok_or_else(|| { - warn!("Event not found, event ID: {:?}", &body.event_id); - Error::BadRequest(ErrorKind::NotFound, "Event not found.") - })?; + let event = + services().rooms.timeline.get_pdu_json(&body.event_id)?.ok_or_else( + || { + warn!(event_id = %body.event_id, "Event not found"); + Error::BadRequest(ErrorKind::NotFound, "Event not found.") + }, + )?; let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; let auth_chain_ids = services() .rooms @@ -1204,24 +1332,24 @@ pub async fn get_event_authorization_route( .get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]) .await?; - Ok(get_event_authorization::v1::Response { + Ok(Ra(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?) + .filter_map(|id| { + services().rooms.timeline.get_pdu_json(&id).ok()? + }) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - }) + })) } /// # `GET /_matrix/federation/v1/state/{roomId}` /// /// Retrieves the current state of the room. -pub async fn get_room_state_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn get_room_state_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); if !services() .rooms @@ -1229,7 +1357,7 @@ pub async fn get_room_state_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1256,12 +1384,7 @@ pub async fn get_room_state_route( .into_values() .map(|id| { PduEvent::convert_to_outgoing_federation_event( - services() - .rooms - .timeline - .get_pdu_json(&id) - .unwrap() - .unwrap(), + services().rooms.timeline.get_pdu_json(&id).unwrap().unwrap(), ) }) .collect(); @@ -1272,32 +1395,31 @@ pub async fn get_room_state_route( .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) .await?; - Ok(get_room_state::v1::Response { + Ok(Ra(get_room_state::v1::Response { auth_chain: auth_chain_ids - .filter_map( - |id| match services().rooms.timeline.get_pdu_json(&id).ok()? { - Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), - None => { - error!("Could not find event json for {id} in db."); - None - } - }, - ) + .filter_map(|event_id| { + if let Some(json) = + services().rooms.timeline.get_pdu_json(&event_id).ok()? + { + Some(PduEvent::convert_to_outgoing_federation_event(json)) + } else { + error!(%event_id, "Could not find event JSON for event"); + None + } + }) .collect(), pdus, - }) + })) } /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// /// Retrieves the current state of the room. -pub async fn get_room_state_ids_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn get_room_state_ids_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); if !services() .rooms @@ -1305,7 +1427,7 @@ pub async fn get_room_state_ids_route( .server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server is not in room.", )); } @@ -1339,18 +1461,18 @@ pub async fn get_room_state_ids_route( .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) .await?; - Ok(get_room_state_ids::v1::Response { + Ok(Ra(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), pdu_ids, - }) + })) } /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// /// Creates a join template. -pub async fn create_join_event_template_route( - body: Ruma, -) -> Result { +pub(crate) async fn create_join_event_template_route( + body: Ar, +) -> Result> { if !services().rooms.metadata.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -1358,43 +1480,40 @@ pub async fn create_join_event_template_route( )); } - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); services() .rooms .event_handler .acl_check(sender_servername, &body.room_id)?; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(body.room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(body.room_id.clone()) + .await; - // TODO: Conduit does not implement restricted join rules yet, we always reject + // TODO: Grapevine does not implement restricted join rules yet, we always + // reject let join_rules_event = services().rooms.state_accessor.room_state_get( &body.room_id, &StateEventType::RoomJoinRules, "", )?; - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") + let join_rules_event_content: Option = + join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err( + |error| { + warn!(%error, "Invalid join rules event"); + Error::bad_database("Invalid join rules event in db.") + }, + ) }) - }) - .transpose()?; + .transpose()?; if let Some(join_rules_event_content) = join_rules_event_content { if matches!( @@ -1403,12 +1522,15 @@ pub async fn create_join_event_template_route( ) { return Err(Error::BadRequest( ErrorKind::UnableToAuthorizeJoin, - "Conduit does not support restricted rooms yet.", + "Grapevine does not support restricted rooms yet.", )); } } - let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + let room_version_id = services() + .rooms + .state + .get_create_content::(&body.room_id)?; if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -1430,34 +1552,36 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( - PduBuilder { - event_type: TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - &body.user_id, - &body.room_id, - &state_lock, - )?; + let (_pdu, mut pdu_json) = + services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &room_token, + )?; - drop(state_lock); + drop(room_token); pdu_json.remove("event_id"); - Ok(prepare_join_event::v1::Response { + Ok(Ra(prepare_join_event::v1::Response { room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) + event: to_raw_value(&pdu_json) + .expect("CanonicalJson can be serialized to JSON"), + })) } +#[allow(clippy::too_many_lines)] async fn create_join_event( sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, -) -> Result { +) -> Result { if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, @@ -1465,27 +1589,28 @@ async fn create_join_event( )); } - services() - .rooms - .event_handler - .acl_check(sender_servername, room_id)?; + services().rooms.event_handler.acl_check(sender_servername, room_id)?; - // TODO: Conduit does not implement restricted join rules yet, we always reject + // TODO: Grapevine does not implement restricted join rules yet, we always + // reject let join_rules_event = services().rooms.state_accessor.room_state_get( room_id, &StateEventType::RoomJoinRules, "", )?; - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") + let join_rules_event_content: Option = + join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err( + |error| { + warn!(%error, "Invalid join rules event"); + Error::bad_database("Invalid join rules event in db.") + }, + ) }) - }) - .transpose()?; + .transpose()?; if let Some(join_rules_event_content) = join_rules_event_content { if matches!( @@ -1494,35 +1619,36 @@ async fn create_join_event( ) { return Err(Error::BadRequest( ErrorKind::UnableToAuthorizeJoin, - "Conduit does not support restricted rooms yet.", + "Grapevine does not support restricted rooms yet.", )); } } - // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = services() - .rooms - .state - .get_room_shortstatehash(room_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; + // We need to return the state prior to joining, let's keep a reference to + // that here + let shortstatehash = + services().rooms.state.get_room_shortstatehash(room_id)?.ok_or( + Error::BadRequest(ErrorKind::NotFound, "Pdu state not found."), + )?; let pub_key_map = RwLock::new(BTreeMap::new()); - // let mut auth_cache = EventMap::new(); - // We do not add the event_id field to the pdu here because of signature and hashes checks - let room_version_id = services().rooms.state.get_room_version(room_id)?; - let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let room_version_id = + services().rooms.state.get_create_content::(room_id)?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id)); + }; + + let Ok((event_id, value)) = + gen_event_id_canonical_json(pdu, &room_version_rules) + else { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); }; let origin: OwnedServerName = serde_json::from_value( @@ -1532,34 +1658,35 @@ async fn create_join_event( ))?) .expect("CanonicalJson is valid json value"), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid.") + })?; - let mutex = Arc::clone( - services() - .globals - .roomid_mutex_federation - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let pdu_id: Vec = services() + let federation_token = services() + .globals + .roomid_mutex_federation + .lock_key(room_id.to_owned()) + .await; + let pdu_id = services() .rooms .event_handler - .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .handle_incoming_pdu( + &origin, + &event_id, + room_id, + value, + true, + &pub_key_map, + ) .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", ))?; - drop(mutex_lock); + drop(federation_token); - let state_ids = services() - .rooms - .state_accessor - .state_full_ids(shortstatehash) - .await?; + let state_ids = + services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let auth_chain_ids = services() .rooms .auth_chain @@ -1570,78 +1697,85 @@ async fn create_join_event( .rooms .state_cache .room_servers(room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter(|server| &**server != services().globals.server_name()); services().sending.send_pdu(servers, &pdu_id)?; - Ok(create_join_event::v1::RoomState { + Ok(create_join_event::v2::RoomState { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) + .filter_map(|id| { + services().rooms.timeline.get_pdu_json(&id).ok().flatten() + }) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() - .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) + .filter_map(|(_, id)| { + services().rooms.timeline.get_pdu_json(id).ok().flatten() + }) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - event: None, // TODO: handle restricted joins + // TODO: handle restricted joins + event: None, + members_omitted: false, + servers_in_room: None, }) } /// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -pub async fn create_join_event_v1_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +#[allow(deprecated)] +pub(crate) async fn create_join_event_v1_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); - let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + let create_join_event::v2::RoomState { + auth_chain, + state, + event, + .. + } = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event::v1::RoomState { + auth_chain, + state, + event, + }; - Ok(create_join_event::v1::Response { room_state }) + Ok(Ra(create_join_event::v1::Response { + room_state, + })) } /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -pub async fn create_join_event_v2_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +pub(crate) async fn create_join_event_v2_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); - let create_join_event::v1::RoomState { - auth_chain, - state, - event, - } = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; - let room_state = create_join_event::v2::RoomState { - members_omitted: false, - auth_chain, - state, - event, - servers_in_room: None, - }; + let room_state = + create_join_event(sender_servername, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v2::Response { room_state }) + Ok(Ra(create_join_event::v2::Response { + room_state, + })) } /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` /// /// Invites a remote user to a room. -pub async fn create_invite_route( - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); +#[allow(clippy::too_many_lines)] +pub(crate) async fn create_invite_route( + body: Ar, +) -> Result> { + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); services() .rooms @@ -1660,22 +1794,33 @@ pub async fn create_invite_route( "Server does not support this room version.", )); } + let room_version_rules = body + .room_version + .rules() + .expect("ruma should support all room versions we advertise"); - let mut signed_event = utils::to_canonical_object(&body.event) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + let mut signed_event = + utils::to_canonical_object(&body.event).map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invite event is invalid.", + ) + })?; ruma::signatures::hash_and_sign_event( services().globals.server_name().as_str(), services().globals.keypair(), &mut signed_event, - &body.room_version, + &room_version_rules.redaction, ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event.") + })?; // Generate event id let event_id = EventId::parse(format!( "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) + ruma::signatures::reference_hash(&signed_event, &room_version_rules) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -1696,7 +1841,9 @@ pub async fn create_invite_route( .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id.") + })?; let invited_user: Box<_> = serde_json::from_value( signed_event @@ -1708,23 +1855,38 @@ pub async fn create_invite_route( .clone() .into(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "state_key is not a user id.", + ) + })?; let mut invite_state = body.invite_room_state.clone(); let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid invite event bytes.", + ) + })?; event.insert("event_id".to_owned(), "$dummy".into()); - let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })?; + let pdu: PduEvent = + serde_json::from_value(event.into()).map_err(|error| { + warn!(%error, "Invalid invite event"); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; invite_state.push(pdu.to_stripped_state_event()); - // If we are active in the room, the remote server will notify us about the join via /send + // If we are active in the room, the remote server will notify us about the + // invite via m.room.member through /send. If we are not in the room, we + // need to manually record the invited state for clients' /sync through + // update_membership(), and send the invite pseudo-PDU to the affected + // appservices. if !services() .rooms .state_cache @@ -1733,24 +1895,43 @@ pub async fn create_invite_route( services().rooms.state_cache.update_membership( &body.room_id, &invited_user, - MembershipState::Invite, + &MembershipState::Invite, &sender, Some(invite_state), true, )?; + + for appservice in services().appservice.read().await.values() { + if appservice.is_user_match(&invited_user) { + appservice_server::send_request( + appservice.registration.clone(), + ruma::api::appservice::event::push_events::v1::Request { + events: vec![pdu.to_room_event()], + txn_id: + base64::engine::general_purpose::URL_SAFE_NO_PAD + .encode(utils::calculate_hash([pdu + .event_id() + .as_bytes()])) + .into(), + ephemeral: Vec::new(), + }, + ) + .await?; + } + } } - Ok(create_invite::v2::Response { + Ok(Ra(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), - }) + })) } /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. -pub async fn get_devices_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_devices_route( + body: Ar, +) -> Result> { if body.user_id.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1758,12 +1939,10 @@ pub async fn get_devices_route( )); } - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); + let sender_servername = + body.sender_servername.as_ref().expect("server is authenticated"); - Ok(get_devices::v1::Response { + Ok(Ra(get_devices::v1::Response { user_id: body.user_id.clone(), stream_id: services() .users @@ -1774,7 +1953,7 @@ pub async fn get_devices_route( devices: services() .users .all_devices_metadata(&body.user_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .filter_map(|metadata| { Some(UserDevice { keys: services() @@ -1786,44 +1965,42 @@ pub async fn get_devices_route( }) }) .collect(), - master_key: services().users.get_master_key(None, &body.user_id, &|u| { - u.server_name() == sender_servername - })?, - self_signing_key: services() - .users - .get_self_signing_key(None, &body.user_id, &|u| { - u.server_name() == sender_servername - })?, - }) + master_key: services().users.get_master_key( + None, + &body.user_id, + &|u| u.server_name() == sender_servername, + )?, + self_signing_key: services().users.get_self_signing_key( + None, + &body.user_id, + &|u| u.server_name() == sender_servername, + )?, + })) } /// # `GET /_matrix/federation/v1/query/directory` /// /// Resolve a room alias to a room id. -pub async fn get_room_information_route( - body: Ruma, -) -> Result { - let room_id = services() - .rooms - .alias - .resolve_local_alias(&body.room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room alias not found.", - ))?; +pub(crate) async fn get_room_information_route( + body: Ar, +) -> Result> { + let room_id = + services().rooms.alias.resolve_local_alias(&body.room_alias)?.ok_or( + Error::BadRequest(ErrorKind::NotFound, "Room alias not found."), + )?; - Ok(get_room_information::v1::Response { + Ok(Ra(get_room_information::v1::Response { room_id, servers: vec![services().globals.server_name().to_owned()], - }) + })) } /// # `GET /_matrix/federation/v1/query/profile` /// /// Gets information on a profile. -pub async fn get_profile_information_route( - body: Ruma, -) -> Result { +pub(crate) async fn get_profile_information_route( + body: Ar, +) -> Result> { if body.user_id.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1837,11 +2014,11 @@ pub async fn get_profile_information_route( match &body.field { Some(ProfileField::DisplayName) => { - displayname = services().users.displayname(&body.user_id)? + displayname = services().users.displayname(&body.user_id)?; } Some(ProfileField::AvatarUrl) => { avatar_url = services().users.avatar_url(&body.user_id)?; - blurhash = services().users.blurhash(&body.user_id)? + blurhash = services().users.blurhash(&body.user_id)?; } // TODO: what to do with custom Some(_) => {} @@ -1852,17 +2029,19 @@ pub async fn get_profile_information_route( } } - Ok(get_profile_information::v1::Response { - blurhash, + Ok(Ra(get_profile_information::v1::Response { displayname, avatar_url, - }) + blurhash, + })) } /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -pub async fn get_keys_route(body: Ruma) -> Result { +pub(crate) async fn get_keys_route( + body: Ar, +) -> Result> { if body .device_keys .iter() @@ -1879,19 +2058,19 @@ pub async fn get_keys_route(body: Ruma) -> Result, -) -> Result { +pub(crate) async fn claim_keys_route( + body: Ar, +) -> Result> { if body .one_time_keys .iter() @@ -1905,9 +2084,89 @@ pub async fn claim_keys_route( let result = claim_keys_helper(&body.one_time_keys).await?; - Ok(claim_keys::v1::Response { + Ok(Ra(claim_keys::v1::Response { one_time_keys: result.one_time_keys, - }) + })) +} + +/// # `GET /_matrix/federation/v1/media/download/{mediaId}` +/// +/// Downloads media owned by a remote homeserver. +pub(crate) async fn media_download_route( + body: Ar, +) -> Result> { + let mxc = MxcData::new(services().globals.server_name(), &body.media_id)?; + let Some(( + crate::service::media::FileMeta { + content_disposition, + content_type, + }, + file, + )) = services().media.get(mxc.into()).await? + else { + return Err(Error::BadRequest( + ErrorKind::NotYetUploaded, + "Media not found", + )); + }; + + let content_disposition = content_disposition.and_then(|s| { + s.parse().inspect_err( + |error| warn!(%error, "Invalid Content-Disposition in database"), + ) + .ok() + }); + + Ok(Ra(authenticated_media::get_content::v1::Response { + metadata: authenticated_media::ContentMetadata {}, + content: authenticated_media::FileOrLocation::File( + authenticated_media::Content { + file, + content_type, + content_disposition, + }, + ), + })) +} + +/// # `GET /_matrix/federation/v1/media/thumbnail/{mediaId}` +/// +/// Downloads a thumbnail from a remote homeserver. +pub(crate) async fn media_thumbnail_route( + body: Ar, +) -> Result> { + let mxc = MxcData::new(services().globals.server_name(), &body.media_id)?; + let width = body.width.try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid.") + })?; + let height = body.height.try_into().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Height is invalid.") + })?; + + let Some(( + crate::service::media::FileMeta { + content_type, + .. + }, + file, + )) = services().media.get_thumbnail(mxc.into(), width, height).await? + else { + return Err(Error::BadRequest( + ErrorKind::NotYetUploaded, + "Media not found", + )); + }; + + Ok(Ra(authenticated_media::get_content_thumbnail::v1::Response { + metadata: authenticated_media::ContentMetadata {}, + content: authenticated_media::FileOrLocation::File( + authenticated_media::Content { + file, + content_type, + content_disposition: None, + }, + ), + })) } #[cfg(test)] @@ -1943,7 +2202,7 @@ mod tests { assert_eq!( add_port_to_hostname("example.com"), FedDest::Named(String::from("example.com"), String::from(":8448")) - ) + ); } #[test] @@ -1951,6 +2210,6 @@ mod tests { assert_eq!( add_port_to_hostname("example.com:1337"), FedDest::Named(String::from("example.com"), String::from(":1337")) - ) + ); } } diff --git a/src/api/well_known.rs b/src/api/well_known.rs new file mode 100644 index 00000000..edb5a347 --- /dev/null +++ b/src/api/well_known.rs @@ -0,0 +1,41 @@ +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + +//! Handle requests for `/.well-known/matrix/...` files + +use http::StatusCode; +use ruma::api::{ + client::discovery::discover_homeserver as client, + federation::discovery::discover_homeserver as server, +}; + +use crate::{services, Ar, Ra}; + +/// Handler for `/.well-known/matrix/server` +pub(crate) async fn server( + _: Ar, +) -> Result, StatusCode> { + let Some(authority) = + services().globals.config.server_discovery.server.authority.clone() + else { + return Err(StatusCode::NOT_FOUND); + }; + + if authority == services().globals.config.server_name { + // Delegation isn't needed in this case + return Err(StatusCode::NOT_FOUND); + } + + Ok(Ra(server::Response::new(authority))) +} + +/// Handler for `/.well-known/matrix/client` +pub(crate) async fn client(_: Ar) -> Ra { + // I wish ruma used an actual URL type instead of `String` + let base_url = + services().globals.config.server_discovery.client.base_url.to_string(); + + Ra(client::Response { + homeserver: client::HomeserverInfo::new(base_url.clone()), + identity_server: None, + }) +} diff --git a/src/clap.rs b/src/clap.rs deleted file mode 100644 index 170d2a17..00000000 --- a/src/clap.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Integration with `clap` - -use clap::Parser; - -/// Returns the current version of the crate with extra info if supplied -/// -/// Set the environment variable `CONDUIT_VERSION_EXTRA` to any UTF-8 string to -/// include it in parenthesis after the SemVer version. A common value are git -/// commit hashes. -fn version() -> String { - let cargo_pkg_version = env!("CARGO_PKG_VERSION"); - - match option_env!("CONDUIT_VERSION_EXTRA") { - Some(x) => format!("{} ({})", cargo_pkg_version, x), - None => cargo_pkg_version.to_owned(), - } -} - -/// Command line arguments -#[derive(Parser)] -#[clap(about, version = version())] -pub struct Args {} - -/// Parse command line arguments into structured data -pub fn parse() -> Args { - Args::parse() -} diff --git a/src/cli.rs b/src/cli.rs new file mode 100644 index 00000000..4f4c85cc --- /dev/null +++ b/src/cli.rs @@ -0,0 +1,124 @@ +//! Integration with `clap` +//! +//! CLI argument structs are defined in this module. Execution logic for each +//! command goes in a submodule. + +use std::path::PathBuf; + +use clap::{Parser, Subcommand}; + +use crate::{ + config::{default_tracing_filter, EnvFilterClone, LogFormat}, + error, observability, +}; + +mod check_config; +mod serve; + +/// Command line arguments +#[derive(Parser)] +#[clap( + about, + version = crate::version(), +)] +pub(crate) struct Args { + #[clap(subcommand)] + pub(crate) command: Command, +} + +#[derive(Subcommand)] +pub(crate) enum Command { + /// Run the server. + Serve(ServeArgs), + + /// Check the configuration file for syntax and semantic errors. + CheckConfig(CheckConfigArgs), +} + +#[derive(clap::Args)] +pub(crate) struct CheckConfigArgs { + #[clap(flatten)] + config: ConfigArg, + + #[clap(flatten)] + observability: ObservabilityArgs, + + /// Supply this option if the check is being performed in a sandbox. + /// + /// This causes certain checks that can only be run against the actual + /// installation to be skipped. + #[arg(long, short)] + sandboxed: bool, +} + +/// Wrapper for the `--config` arg. +/// +/// This exists to centralize the `mut_arg` code that sets the help value based +/// on runtime information. +#[derive(clap::Args)] +#[clap(mut_arg("config", |x| { + let help = "Set the path to the configuration file"; + x.help(help).long_help(format!( + "{}\n\nIf this option is specified, the provided value is used \ + as-is.\n\nIf this option is not specified, then the XDG Base \ + Directory Specification is followed, searching for the path `{}` \ + in the configuration directories. + ", + help, + crate::config::DEFAULT_PATH.display(), + )) +}))] +pub(crate) struct ConfigArg { + /// Path to the configuration file + #[clap(long, short)] + pub(crate) config: Option, +} + +/// Observability arguments for CLI subcommands +#[derive(clap::Args)] +struct ObservabilityArgs { + /// Log format + #[clap(long, default_value_t = LogFormat::Full)] + log_format: LogFormat, + + /// Log filter + /// + /// For information about the syntax, see here: + /// + #[clap(long, default_value_t = default_tracing_filter())] + log_filter: EnvFilterClone, +} + +#[derive(clap::Args)] +pub(crate) struct ServeArgs { + #[clap(flatten)] + pub(crate) config: ConfigArg, +} + +impl Args { + pub(crate) async fn run(self) -> Result<(), error::Main> { + if let Some((format, filter)) = self.command.cli_observability_args() { + observability::init_for_cli(format, filter.into())?; + } + + match self.command { + Command::Serve(args) => serve::run(args).await?, + Command::CheckConfig(args) => check_config::run(args).await?, + } + Ok(()) + } +} + +impl Command { + fn cli_observability_args(&self) -> Option<(LogFormat, EnvFilterClone)> { + // All subcommands other than `serve` should return `Some`. Keep these + // match arms sorted by the enum variant name. + match self { + Command::CheckConfig(args) => Some(( + args.observability.log_format, + args.observability.log_filter.clone(), + )), + Command::Serve(_) => None, + } + } +} diff --git a/src/cli/check_config.rs b/src/cli/check_config.rs new file mode 100644 index 00000000..943491bb --- /dev/null +++ b/src/cli/check_config.rs @@ -0,0 +1,12 @@ +use tracing::info; + +use crate::{cli::CheckConfigArgs, config, error}; + +pub(crate) async fn run( + args: CheckConfigArgs, +) -> Result<(), error::CheckConfigCommand> { + let _config = + config::load(args.config.config.as_ref(), args.sandboxed).await?; + info!("Configuration looks good"); + Ok(()) +} diff --git a/src/cli/serve.rs b/src/cli/serve.rs new file mode 100644 index 00000000..f0e0f969 --- /dev/null +++ b/src/cli/serve.rs @@ -0,0 +1,999 @@ +use std::{ + collections::HashSet, convert::Infallible, future::Future, + net::SocketAddr as IpSocketAddr, + os::unix::net::SocketAddr as UnixSocketAddr, sync::atomic, time::Duration, +}; + +use axum::{ + extract::{ + connect_info::{Connected, IntoMakeServiceWithConnectInfo}, + ConnectInfo, DefaultBodyLimit, FromRequestParts, MatchedPath, + }, + middleware::AddExtension, + response::IntoResponse, + routing::{any, get, on, MethodFilter, Route}, + Router, +}; +use axum_server::{ + accept::Accept, + bind, + service::SendService, + tls_rustls::{RustlsAcceptor, RustlsConfig}, + Address, Server, +}; +use http::{ + header::{self, HeaderName}, + Method, StatusCode, Uri, +}; +use hyper::body::Incoming; +use proxy_header::ProxyHeader; +use ruma::api::{ + client::{ + error::{Error as RumaError, ErrorBody, ErrorKind}, + uiaa::UiaaResponse, + }, + federation::discovery::get_server_version, + IncomingRequest, +}; +use strum::Display; +use tokio::{ + io::{AsyncRead, AsyncWrite}, + signal, + task::JoinSet, +}; +use tower::{Layer, Service, ServiceBuilder}; +use tower_http::{ + cors::{self, CorsLayer}, + trace::TraceLayer, + ServiceBuilderExt as _, +}; +use tracing::{debug, info, info_span, warn, Instrument}; + +use super::ServeArgs; +use crate::{ + api::{ + client_server, + ruma_wrapper::{Ar, Ra}, + server_server::{self, AllowLoopbackRequests, LogRequestError}, + well_known, + }, + config::{self, Config, ListenComponent, ListenConfig, ListenTransport}, + database::KeyValueDatabase, + error, observability, services, set_application_state, + utils::{ + self, + error::{Error, Result}, + proxy_protocol::{ProxyAcceptor, ProxyAcceptorConfig}, + }, + ApplicationState, Services, +}; + +pub(crate) async fn run(args: ServeArgs) -> Result<(), error::ServeCommand> { + use error::ServeCommand as Error; + + let config = config::load(args.config.config.as_ref(), false).await?; + + rustls::crypto::ring::default_provider() + .install_default() + .expect("rustls default crypto provider should not be already set"); + + let (_guard, reload_handles) = observability::init(&config)?; + + info!("Loading database"); + let db = Box::leak(Box::new( + KeyValueDatabase::load_or_create(&config) + .map_err(Error::DatabaseError)?, + )); + + // This struct will remove old Unix sockets once it's dropped. + let _clean_up_socks = CleanUpUnixSockets(config.listen.clone()); + + Services::new(db, config, Some(reload_handles)) + .map_err(Error::InitializeServices)? + .install(); + + services().globals.err_if_server_name_changed()?; + + db.apply_migrations().await.map_err(Error::DatabaseError)?; + + info!("Starting background tasks"); + services().admin.start_handler(); + services().sending.start_handler(); + KeyValueDatabase::start_cleanup_task(); + services().globals.set_emergency_access(); + + info!("Starting server"); + run_server().await?; + + Ok(()) +} + +struct CleanUpUnixSockets(Vec); + +impl Drop for CleanUpUnixSockets { + fn drop(&mut self) { + // Remove old Unix sockets + for listen in &self.0 { + if let ListenTransport::Unix { + path, + .. + } = &listen.transport + { + info!( + path = path.display().to_string(), + "Removing Unix socket" + ); + if let Err(error) = std::fs::remove_file(path) { + warn!(%error, "Couldn't remove Unix socket"); + } + } + } + } +} + +#[tracing::instrument] +async fn federation_self_test() -> Result<()> { + let response = server_server::send_request( + &services().globals.config.server_name, + get_server_version::v1::Request {}, + LogRequestError::Yes, + AllowLoopbackRequests::Yes, + ) + .await?; + + if response + .server + .as_ref() + .is_none_or(|s| s.name.as_deref() != Some(env!("CARGO_PKG_NAME"))) + { + error!(?response, "unexpected server version"); + return Err(Error::BadConfig( + "Got unexpected version from our own version endpoint", + )); + } + + Ok(()) +} + +// A trait we'll implement on `axum_server::Handle` in order to be able to +// shutdown handles regardless of their generics. +trait ServerHandle: Send { + fn shutdown(&self, timeout: Option); +} + +impl ServerHandle for axum_server::Handle { + fn shutdown(&self, timeout: Option) { + self.graceful_shutdown(timeout); + } +} + +/// This type is needed to allow us to find out where incoming connections came +/// from. Before Unix socket support, we could simply use `IpSocketAddr` here, +/// but this is no longer possible. +#[derive(Clone, Display)] +enum AddrConnectInfo { + #[strum(to_string = "{0}")] + Ip(IpSocketAddr), + + #[strum(to_string = "[unix socket]")] + UnixSocket, + + #[strum(to_string = "[unknown]")] + Unknown, +} + +impl Connected for AddrConnectInfo { + fn connect_info(target: IpSocketAddr) -> Self { + Self::Ip(target) + } +} + +impl Connected for AddrConnectInfo { + fn connect_info(_target: UnixSocketAddr) -> Self { + // The `UnixSocketAddr` we get here is one that we can't recover the + // path from (`as_pathname` returns `None`), so there's no point + // in saving it (we only use all this for logging). + Self::UnixSocket + } +} + +struct ServerSpawner<'cfg, M> { + config: &'cfg Config, + middlewares: M, + + tls_config: Option, + proxy_config: ProxyAcceptorConfig, + servers: JoinSet<(ListenConfig, std::io::Result<()>)>, + handles: Vec>, +} + +impl<'cfg, M> ServerSpawner<'cfg, M> +where + M: Layer + Clone + Send + Sync + 'static, + M::Service: Service< + axum::extract::Request, + Response = axum::response::Response, + Error = Infallible, + > + Clone + + Send + + Sync + + 'static, + >::Future: Send + 'static, +{ + async fn new( + config: &'cfg Config, + middlewares: M, + ) -> Result { + let tls_config = if let Some(tls) = &config.tls { + Some( + RustlsConfig::from_pem_file(&tls.certs, &tls.key) + .await + .map_err(|err| error::Serve::LoadCerts { + certs: tls.certs.clone(), + key: tls.key.clone(), + err, + })?, + ) + } else { + None + }; + + let proxy_config = ProxyAcceptorConfig::default(); + + Ok(Self { + config, + middlewares, + tls_config, + proxy_config, + servers: JoinSet::new(), + handles: Vec::new(), + }) + } + + /// Returns a function that transforms a lower-layer acceptor into a TLS + /// acceptor. + fn tls_acceptor_factory( + &self, + listen: &ListenConfig, + ) -> Result RustlsAcceptor, error::Serve> { + let config = self + .tls_config + .clone() + .ok_or_else(|| error::Serve::NoTlsCerts(listen.clone()))?; + + Ok(|inner| RustlsAcceptor::new(config).acceptor(inner)) + } + + /// Returns a function that transforms a lower-layer acceptor into a Proxy + /// Protocol acceptor. + fn proxy_acceptor_factory(&self) -> impl FnOnce(A) -> ProxyAcceptor { + let config = self.proxy_config.clone(); + + |inner| ProxyAcceptor::new(inner, config) + } + + fn spawn_server_inner( + &mut self, + listen: ListenConfig, + server: Server, + app: IntoMakeServiceWithConnectInfo, + ) where + AddrConnectInfo: Connected, + Addr: Address + Send + 'static, + Addr::Stream: Send, + Addr::Listener: Send, + A: Accept< + Addr::Stream, + AddExtension>, + > + Clone + + Send + + Sync + + 'static, + A::Stream: AsyncRead + AsyncWrite + Unpin + Send + Sync, + A::Service: SendService> + Send, + A::Future: Send, + { + let handle = axum_server::Handle::new(); + let server = server.handle(handle.clone()).serve(app); + self.servers.spawn(async move { + let result = server.await; + + (listen, result) + }); + self.handles.push(Box::new(handle)); + } + + fn spawn_server( + &mut self, + listen: ListenConfig, + ) -> Result<(), error::Serve> { + let app = routes(self.config, &listen.components) + .layer(self.middlewares.clone()) + .into_make_service_with_connect_info::(); + + match &listen.transport { + ListenTransport::Tcp { + address, + port, + tls, + proxy_protocol, + } => { + let addr = IpSocketAddr::from((*address, *port)); + let server = bind(addr); + + match (tls, proxy_protocol) { + (false, false) => { + self.spawn_server_inner(listen, server, app); + } + (false, true) => { + let server = server.map(self.proxy_acceptor_factory()); + self.spawn_server_inner(listen, server, app); + } + (true, false) => { + let server = + server.map(self.tls_acceptor_factory(&listen)?); + self.spawn_server_inner(listen, server, app); + } + (true, true) => { + let server = server + .map(self.proxy_acceptor_factory()) + .map(self.tls_acceptor_factory(&listen)?); + self.spawn_server_inner(listen, server, app); + } + } + + Ok(()) + } + ListenTransport::Unix { + path, + proxy_protocol, + } => { + let addr = match UnixSocketAddr::from_pathname(path) { + Ok(addr) => addr, + Err(e) => { + // We can't use `map_err` here, as that would move + // `listen` into a closure, preventing us from using it + // later. + return Err(error::Serve::Listen(e, listen)); + } + }; + let server = bind(addr); + + if *proxy_protocol { + let server = server.map(self.proxy_acceptor_factory()); + self.spawn_server_inner(listen, server, app); + } else { + self.spawn_server_inner(listen, server, app); + } + + Ok(()) + } + } + } +} + +#[allow(clippy::too_many_lines)] +async fn run_server() -> Result<(), error::Serve> { + use error::Serve as Error; + + let config = &services().globals.config; + + let x_requested_with = HeaderName::from_static("x-requested-with"); + + let middlewares = ServiceBuilder::new() + .sensitive_headers([header::AUTHORIZATION]) + .layer(axum::middleware::from_fn(spawn_task)) + .layer( + TraceLayer::new_for_http() + .make_span_with(|request: &http::Request<_>| { + let endpoint = if let Some(endpoint) = + request.extensions().get::() + { + endpoint.as_str() + } else { + request.uri().path() + }; + + let method = request.method(); + + let source_address = request + .extensions() + .get::>() + .map_or_else( + || { + request + .extensions() + .get::>() + .map(|ConnectInfo(addr)| addr.clone()) + }, + |h| { + h.proxied_address().map(|addr| { + AddrConnectInfo::Ip(addr.source) + }) + }, + ) + .unwrap_or(AddrConnectInfo::Unknown); + + tracing::info_span!( + "http_request", + otel.name = format!("{method} {endpoint}"), + %method, + %endpoint, + %source_address, + ) + }) + .on_request( + |request: &http::Request<_>, _span: &tracing::Span| { + // can be enabled selectively using `filter = + // grapevine[incoming_request_curl]=trace` in config + tracing::trace_span!("incoming_request_curl").in_scope( + || { + tracing::trace!( + cmd = utils::curlify(request), + "curl command line for incoming request \ + (guessed hostname)" + ); + }, + ); + }, + ), + ) + .layer(axum::middleware::from_fn(unrecognized_method)) + .layer( + CorsLayer::new() + .allow_origin(cors::Any) + .allow_methods([ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + ]) + .allow_headers([ + header::ORIGIN, + x_requested_with, + header::CONTENT_TYPE, + header::ACCEPT, + header::AUTHORIZATION, + ]) + .max_age(Duration::from_secs(86400)), + ) + .layer(DefaultBodyLimit::max( + config + .max_request_size + .try_into() + .expect("failed to convert max request size"), + )) + .layer(axum::middleware::from_fn(observability::http_metrics_layer)) + .layer(axum::middleware::from_fn(observability::traceresponse_layer)); + + let mut spawner = ServerSpawner::new(config, middlewares).await?; + + if config.listen.is_empty() { + return Err(Error::NoListeners); + } + + for listen in &config.listen { + info!(listener = %listen, "Listening for incoming traffic"); + spawner.spawn_server(listen.clone())?; + } + + tokio::spawn(handle_signals(spawner.tls_config, spawner.handles)); + + if config.federation.enable && config.federation.self_test { + federation_self_test() + .await + .map_err(error::Serve::FederationSelfTestFailed)?; + debug!("Federation self-test completed successfully"); + } + + set_application_state(ApplicationState::Ready); + + while let Some(result) = spawner.servers.join_next().await { + let (listen, result) = + result.expect("should be able to join server task"); + result.map_err(|err| Error::Listen(err, listen))?; + } + + Ok(()) +} + +/// Ensures the request runs in a new tokio thread. +/// +/// The axum request handler task gets cancelled if the connection is shut down; +/// by spawning our own task, processing continue after the client disconnects. +async fn spawn_task( + req: axum::extract::Request, + next: axum::middleware::Next, +) -> std::result::Result { + if services().globals.shutdown.load(atomic::Ordering::Relaxed) { + return Err(StatusCode::SERVICE_UNAVAILABLE); + } + tokio::spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) +} + +async fn unrecognized_method( + req: axum::extract::Request, + next: axum::middleware::Next, +) -> std::result::Result { + let method = req.method().clone(); + let uri = req.uri().clone(); + let inner = next.run(req).await; + if inner.status() == StatusCode::METHOD_NOT_ALLOWED { + warn!(%method, %uri, "Method not allowed"); + return Ok(Ra(UiaaResponse::MatrixError(RumaError { + body: ErrorBody::Standard { + kind: ErrorKind::Unrecognized, + message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), + }, + status_code: StatusCode::METHOD_NOT_ALLOWED, + })) + .into_response()); + } + Ok(inner) +} + +/// Routes for legacy unauthenticated `/_matrix/media/*` APIs (used by both +/// clients and federation) +fn legacy_media_routes(config: &Config) -> Router { + use client_server as c2s; + + let router = Router::new(); + + // deprecated, but unproblematic + let router = router.ruma_route(c2s::get_media_config_legacy_route); + + if config.media.allow_unauthenticated_access { + router + .ruma_route(c2s::get_content_legacy_route) + .ruma_route(c2s::get_content_as_filename_legacy_route) + .ruma_route(c2s::get_content_thumbnail_legacy_route) + } else { + router + .route( + "/_matrix/media/v3/download/*path", + any(unauthenticated_media_disabled), + ) + .route( + "/_matrix/media/v3/thumbnail/*path", + any(unauthenticated_media_disabled), + ) + } +} + +#[allow(clippy::too_many_lines)] +fn client_routes() -> Router { + use client_server as c2s; + + let router = Router::new() + .ruma_route(c2s::get_supported_versions_route) + .ruma_route(c2s::get_register_available_route) + .ruma_route(c2s::register_route) + .ruma_route(c2s::get_login_types_route) + .ruma_route(c2s::login_route) + .ruma_route(c2s::whoami_route) + .ruma_route(c2s::logout_route) + .ruma_route(c2s::logout_all_route) + .ruma_route(c2s::change_password_route) + .ruma_route(c2s::deactivate_route) + .ruma_route(c2s::third_party_route) + .ruma_route(c2s::request_3pid_management_token_via_email_route) + .ruma_route(c2s::request_3pid_management_token_via_msisdn_route) + .ruma_route(c2s::get_capabilities_route) + .ruma_route(c2s::get_pushrules_all_route) + .ruma_route(c2s::set_pushrule_route) + .ruma_route(c2s::get_pushrule_route) + .ruma_route(c2s::set_pushrule_enabled_route) + .ruma_route(c2s::get_pushrule_enabled_route) + .ruma_route(c2s::get_pushrule_actions_route) + .ruma_route(c2s::set_pushrule_actions_route) + .ruma_route(c2s::delete_pushrule_route) + .ruma_route(c2s::get_room_event_route) + .ruma_route(c2s::get_room_aliases_route) + .ruma_route(c2s::get_filter_route) + .ruma_route(c2s::create_filter_route) + .ruma_route(c2s::set_global_account_data_route) + .ruma_route(c2s::set_room_account_data_route) + .ruma_route(c2s::get_global_account_data_route) + .ruma_route(c2s::get_room_account_data_route) + .ruma_route(c2s::set_displayname_route) + .ruma_route(c2s::get_displayname_route) + .ruma_route(c2s::set_avatar_url_route) + .ruma_route(c2s::get_avatar_url_route) + .ruma_route(c2s::get_profile_route) + .ruma_route(c2s::upload_keys_route) + .ruma_route(c2s::get_keys_route) + .ruma_route(c2s::claim_keys_route) + .ruma_route(c2s::create_backup_version_route) + .ruma_route(c2s::update_backup_version_route) + .ruma_route(c2s::delete_backup_version_route) + .ruma_route(c2s::get_latest_backup_info_route) + .ruma_route(c2s::get_backup_info_route) + .ruma_route(c2s::add_backup_keys_route) + .ruma_route(c2s::add_backup_keys_for_room_route) + .ruma_route(c2s::add_backup_keys_for_session_route) + .ruma_route(c2s::delete_backup_keys_for_room_route) + .ruma_route(c2s::delete_backup_keys_for_session_route) + .ruma_route(c2s::delete_backup_keys_route) + .ruma_route(c2s::get_backup_keys_for_room_route) + .ruma_route(c2s::get_backup_keys_for_session_route) + .ruma_route(c2s::get_backup_keys_route) + .ruma_route(c2s::set_read_marker_route) + .ruma_route(c2s::create_receipt_route) + .ruma_route(c2s::create_typing_event_route) + .ruma_route(c2s::create_room_route) + .ruma_route(c2s::redact_event_route) + .ruma_route(c2s::report_event_route) + .ruma_route(c2s::create_alias_route) + .ruma_route(c2s::delete_alias_route) + .ruma_route(c2s::get_alias_route) + .ruma_route(c2s::join_room_by_id_route) + .ruma_route(c2s::join_room_by_id_or_alias_route) + .ruma_route(c2s::joined_members_route) + .ruma_route(c2s::leave_room_route) + .ruma_route(c2s::forget_room_route) + .ruma_route(c2s::joined_rooms_route) + .ruma_route(c2s::kick_user_route) + .ruma_route(c2s::ban_user_route) + .ruma_route(c2s::unban_user_route) + .ruma_route(c2s::invite_user_route) + .ruma_route(c2s::set_room_visibility_route) + .ruma_route(c2s::get_room_visibility_route) + .ruma_route(c2s::get_public_rooms_route) + .ruma_route(c2s::get_public_rooms_filtered_route) + .ruma_route(c2s::search_users_route) + .ruma_route(c2s::get_member_events_route) + .ruma_route(c2s::get_protocols_route) + .ruma_route(c2s::send_message_event_route) + .ruma_route(c2s::send_state_event_for_key_route) + .ruma_route(c2s::get_state_events_route) + .ruma_route(c2s::get_state_events_for_key_route) + .ruma_route(c2s::v3::sync_events_route) + .ruma_route(c2s::get_context_route) + .ruma_route(c2s::get_message_events_route) + .ruma_route(c2s::search_events_route) + .ruma_route(c2s::turn_server_route) + .ruma_route(c2s::send_event_to_device_route); + + // authenticated media + let router = router + .ruma_route(c2s::get_media_config_route) + .ruma_route(c2s::create_content_route) + .ruma_route(c2s::get_content_route) + .ruma_route(c2s::get_content_as_filename_route) + .ruma_route(c2s::get_content_thumbnail_route); + + let router = router + .ruma_route(c2s::get_devices_route) + .ruma_route(c2s::get_device_route) + .ruma_route(c2s::update_device_route) + .ruma_route(c2s::delete_device_route) + .ruma_route(c2s::delete_devices_route) + .ruma_route(c2s::get_tags_route) + .ruma_route(c2s::update_tag_route) + .ruma_route(c2s::delete_tag_route) + .ruma_route(c2s::upload_signing_keys_route) + .ruma_route(c2s::upload_signatures_route) + .ruma_route(c2s::get_key_changes_route) + .ruma_route(c2s::get_pushers_route) + .ruma_route(c2s::set_pushers_route) + .ruma_route(c2s::upgrade_room_route) + .ruma_route(c2s::get_threads_route) + .ruma_route(c2s::get_relating_events_with_rel_type_and_event_type_route) + .ruma_route(c2s::get_relating_events_with_rel_type_route) + .ruma_route(c2s::get_relating_events_route) + .ruma_route(c2s::get_hierarchy_route); + + // Ruma doesn't have support for multiple paths for a single endpoint yet, + // and these routes share one Ruma request / response type pair with + // {get,send}_state_event_for_key_route. These two endpoints also allow + // trailing slashes. + let router = router + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type", + get(c2s::get_state_events_for_empty_key_route) + .put(c2s::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type", + get(c2s::get_state_events_for_empty_key_route) + .put(c2s::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type/", + get(c2s::get_state_events_for_empty_key_route) + .put(c2s::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type/", + get(c2s::get_state_events_for_empty_key_route) + .put(c2s::send_state_event_for_empty_key_route), + ); + + router + .route( + "/_matrix/client/r0/rooms/:room_id/initialSync", + get(initial_sync), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/initialSync", + get(initial_sync), + ) +} + +fn federation_routes(config: &Config) -> Router { + use server_server as s2s; + + if config.federation.enable { + Router::new() + .ruma_route(s2s::get_server_version_route) + .route("/_matrix/key/v2/server", get(s2s::get_server_keys_route)) + .route( + "/_matrix/key/v2/server/:key_id", + get(s2s::get_server_keys_deprecated_route), + ) + .ruma_route(s2s::get_public_rooms_route) + .ruma_route(s2s::get_public_rooms_filtered_route) + .ruma_route(s2s::send_transaction_message_route) + .ruma_route(s2s::get_event_route) + .ruma_route(s2s::get_backfill_route) + .ruma_route(s2s::get_missing_events_route) + .ruma_route(s2s::get_event_authorization_route) + .ruma_route(s2s::get_room_state_route) + .ruma_route(s2s::get_room_state_ids_route) + .ruma_route(s2s::create_join_event_template_route) + .ruma_route(s2s::create_join_event_v1_route) + .ruma_route(s2s::create_join_event_v2_route) + .ruma_route(s2s::create_invite_route) + .ruma_route(s2s::get_devices_route) + .ruma_route(s2s::get_room_information_route) + .ruma_route(s2s::get_profile_information_route) + .ruma_route(s2s::get_keys_route) + .ruma_route(s2s::claim_keys_route) + .ruma_route(s2s::media_download_route) + .ruma_route(s2s::media_thumbnail_route) + } else { + Router::new() + .route("/_matrix/federation/*path", any(federation_disabled)) + .route("/_matrix/key/*path", any(federation_disabled)) + } +} + +fn metrics_routes(config: &Config) -> Router { + if config.observability.metrics.enable { + Router::new().route( + "/metrics", + get(|| async { observability::METRICS.export() }), + ) + } else { + Router::new() + } +} + +fn well_known_routes() -> Router { + Router::new() + .route("/.well-known/matrix/client", get(well_known::client)) + .route("/.well-known/matrix/server", get(well_known::server)) +} + +fn routes(config: &Config, components: &HashSet) -> Router { + let mut router = Router::new(); + for &component in components { + router = router.merge(match component { + ListenComponent::Client => client_routes(), + ListenComponent::Federation => federation_routes(config), + ListenComponent::Metrics => metrics_routes(config), + ListenComponent::WellKnown => well_known_routes(), + }); + } + + if components.contains(&ListenComponent::Client) + || components.contains(&ListenComponent::Federation) + { + router = router.merge(legacy_media_routes(config)); + } + + router.route("/", get(it_works)).fallback(not_found) +} + +async fn reload_tls_config( + tls_config: &RustlsConfig, +) -> Result<(), error::Serve> { + let config = services() + .globals + .config + .tls + .as_ref() + .expect("TLS config should exist if TLS listener exists"); + + tls_config.reload_from_pem_file(&config.certs, &config.key).await.map_err( + |err| error::Serve::LoadCerts { + certs: config.certs.clone(), + key: config.key.clone(), + err, + }, + )?; + + Ok(()) +} + +async fn handle_signals( + tls_config: Option, + handles: Vec>, +) { + #[cfg(unix)] + async fn wait_signal(sig: signal::unix::SignalKind) { + signal::unix::signal(sig) + .expect("failed to install signal handler") + .recv() + .await; + } + + #[cfg(unix)] + let terminate = || wait_signal(signal::unix::SignalKind::terminate()); + #[cfg(not(unix))] + let terminate = || std::future::pending::<()>(); + + #[cfg(unix)] + let sighup = || wait_signal(signal::unix::SignalKind::hangup()); + #[cfg(not(unix))] + let sighup = || std::future::pending::<()>(); + + let ctrl_c = || async { + signal::ctrl_c().await.expect("failed to install Ctrl+C handler"); + }; + + let sig = loop { + tokio::select! { + () = sighup() => { + info!("Received reload request"); + + set_application_state(ApplicationState::Reloading); + + if let Some(tls_config) = tls_config.as_ref() { + if let Err(error) = reload_tls_config(tls_config).await { + error!(?error, "Failed to reload TLS config"); + } + } + + set_application_state(ApplicationState::Ready); + }, + () = terminate() => { break "SIGTERM"; }, + () = ctrl_c() => { break "Ctrl+C"; }, + } + }; + + warn!(signal = %sig, "Shutting down due to signal"); + + services().globals.shutdown(); + + for handle in handles { + handle.shutdown(Some(Duration::from_secs(30))); + } + + set_application_state(ApplicationState::Stopping); +} + +async fn federation_disabled(_: Uri) -> impl IntoResponse { + Error::bad_config("Federation is disabled.") +} + +async fn unauthenticated_media_disabled(_: Uri) -> impl IntoResponse { + Error::BadRequest( + ErrorKind::NotFound, + "Unauthenticated media access is disabled", + ) +} + +async fn not_found(method: Method, uri: Uri) -> impl IntoResponse { + debug!(%method, %uri, "Unknown route"); + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") +} + +async fn initial_sync(_uri: Uri) -> impl IntoResponse { + Error::BadRequest( + ErrorKind::GuestAccessForbidden, + "Guest access not implemented", + ) +} + +async fn it_works() -> &'static str { + "Hello from Grapevine!" +} + +trait RouterExt { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static; +} + +impl RouterExt for Router { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static, + { + handler.add_to_router(self) + } +} + +pub(crate) trait RumaHandler { + // Can't transform to a handler without boxing or relying on the + // nightly-only impl-trait-in-traits feature. Moving a small amount of + // extra logic into the trait allows bypassing both. + fn add_to_router(self, router: Router) -> Router; +} + +macro_rules! impl_ruma_handler { + ( $($ty:ident),* $(,)? ) => { + #[allow(non_snake_case)] + impl + RumaHandler<($($ty,)* Ar,)> for F + where + Req: IncomingRequest + Send + 'static, + Resp: IntoResponse, + F: FnOnce($($ty,)* Ar) -> Fut + Clone + Send + Sync + 'static, + Fut: Future> + + Send, + E: IntoResponse, + $( $ty: FromRequestParts<()> + Send + 'static, )* + { + fn add_to_router(self, mut router: Router) -> Router { + let meta = Req::METADATA; + let method_filter = method_to_filter(meta.method); + + for path in meta.history.all_paths() { + let handler = self.clone(); + + router = router.route( + path, + on( + method_filter, + |$( $ty: $ty, )* req: Ar| async move { + let span = info_span!( + "run_ruma_handler", + auth.user = ?req.sender_user, + auth.device = ?req.sender_device, + auth.servername = ?req.sender_servername, + auth.appservice_id = ?req.appservice_info + .as_ref() + .map(|i| &i.registration.id) + ); + handler($($ty,)* req).instrument(span).await + } + ) + ) + } + + router + } + } + }; +} + +impl_ruma_handler!(); +impl_ruma_handler!(T1); +impl_ruma_handler!(T1, T2); +impl_ruma_handler!(T1, T2, T3); +impl_ruma_handler!(T1, T2, T3, T4); +impl_ruma_handler!(T1, T2, T3, T4, T5); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); + +fn method_to_filter(method: Method) -> MethodFilter { + match method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + m => panic!("Unsupported HTTP method: {m:?}"), + } +} diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 00000000..a485d4d3 --- /dev/null +++ b/src/config.rs @@ -0,0 +1,593 @@ +use std::{ + borrow::Cow, + collections::{BTreeMap, HashSet}, + fmt::{self, Display}, + net::{IpAddr, Ipv4Addr}, + path::{Path, PathBuf}, + sync::LazyLock, +}; + +use reqwest::Url; +use ruma::{ + api::federation::discovery::OldVerifyKey, OwnedServerName, + OwnedServerSigningKeyId, RoomVersionId, UInt, +}; +use serde::Deserialize; +use strum::{Display, EnumIter, IntoEnumIterator}; + +use crate::{error, utils::partial_canonicalize}; + +mod env_filter_clone; +mod proxy; + +pub(crate) use env_filter_clone::EnvFilterClone; +use proxy::ProxyConfig; + +/// The default configuration file path +pub(crate) static DEFAULT_PATH: LazyLock = + LazyLock::new(|| [env!("CARGO_PKG_NAME"), "config.toml"].iter().collect()); + +#[allow(clippy::struct_excessive_bools)] +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct Config { + #[serde(default = "false_fn")] + pub(crate) conduit_compat: bool, + #[serde(default = "default_listen")] + pub(crate) listen: Vec, + pub(crate) tls: Option, + + /// The name of this homeserver + /// + /// This is the value that will appear e.g. in user IDs and room aliases. + pub(crate) server_name: OwnedServerName, + + pub(crate) server_discovery: ServerDiscovery, + pub(crate) database: DatabaseConfig, + pub(crate) media: MediaConfig, + #[serde(default)] + pub(crate) federation: FederationConfig, + #[serde(default)] + pub(crate) cache: CacheConfig, + #[serde(default = "default_cleanup_second_interval")] + pub(crate) cleanup_second_interval: u32, + #[serde(default = "default_max_request_size")] + pub(crate) max_request_size: UInt, + #[serde(default = "false_fn")] + pub(crate) allow_registration: bool, + pub(crate) registration_token: Option, + #[serde(default = "true_fn")] + pub(crate) allow_encryption: bool, + #[serde(default = "true_fn")] + pub(crate) allow_room_creation: bool, + #[serde(default = "default_default_room_version")] + pub(crate) default_room_version: RoomVersionId, + #[serde(default)] + pub(crate) proxy: ProxyConfig, + pub(crate) jwt_secret: Option, + #[serde(default)] + pub(crate) observability: ObservabilityConfig, + #[serde(default)] + pub(crate) turn: TurnConfig, + + pub(crate) emergency_password: Option, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct MediaConfig { + pub(crate) backend: MediaBackendConfig, + + #[serde(default)] + pub(crate) allow_unauthenticated_access: bool, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields, tag = "type", rename_all = "snake_case")] +pub(crate) enum MediaBackendConfig { + Filesystem(MediaFilesystemConfig), +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct MediaFilesystemConfig { + pub(crate) path: PathBuf, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields, default)] +pub(crate) struct CacheConfig { + pub(crate) pdu: usize, + pub(crate) auth_chain: usize, + pub(crate) short_eventid: usize, + pub(crate) eventid_short: usize, + pub(crate) statekey_short: usize, + pub(crate) short_statekey: usize, + pub(crate) server_visibility: usize, + pub(crate) user_visibility: usize, + pub(crate) state_info: usize, + pub(crate) roomid_spacechunk: usize, +} + +impl Default for CacheConfig { + fn default() -> Self { + Self { + pdu: 150_000, + auth_chain: 100_000, + short_eventid: 100_000, + eventid_short: 100_000, + statekey_short: 100_000, + short_statekey: 100_000, + server_visibility: 100, + user_visibility: 100, + state_info: 100, + roomid_spacechunk: 200, + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct ServerDiscovery { + /// Server-server discovery configuration + #[serde(default)] + pub(crate) server: ServerServerDiscovery, + + /// Client-server discovery configuration + pub(crate) client: ClientServerDiscovery, +} + +/// Server-server discovery configuration +#[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct ServerServerDiscovery { + /// The alternative authority to make server-server API requests to + pub(crate) authority: Option, +} + +/// Client-server discovery configuration +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct ClientServerDiscovery { + /// The base URL to make client-server API requests to + pub(crate) base_url: Url, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct TlsConfig { + pub(crate) certs: String, + pub(crate) key: String, +} + +#[derive( + Debug, Clone, Copy, PartialEq, Eq, Hash, Deserialize, EnumIter, Display, +)] +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +#[serde(deny_unknown_fields)] +pub(crate) enum ListenComponent { + Client, + Federation, + Metrics, + WellKnown, +} + +impl ListenComponent { + fn all_components() -> HashSet { + Self::iter().collect() + } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +#[serde(deny_unknown_fields)] +pub(crate) enum ListenTransport { + Tcp { + #[serde(default = "default_address")] + address: IpAddr, + #[serde(default = "default_port")] + port: u16, + #[serde(default = "false_fn")] + tls: bool, + #[serde(default = "false_fn")] + proxy_protocol: bool, + }, + Unix { + path: PathBuf, + #[serde(default = "false_fn")] + proxy_protocol: bool, + }, +} + +impl Display for ListenTransport { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ListenTransport::Tcp { + address, + port, + tls, + proxy_protocol, + } => { + let scheme = format!( + "{}{}", + if *proxy_protocol { + "proxy+" + } else { + "" + }, + if *tls { + "https" + } else { + "http" + } + ); + write!(f, "{scheme}://{address}:{port}") + } + ListenTransport::Unix { + path, + proxy_protocol, + } => { + write!( + f, + "{}http+unix://{}", + if *proxy_protocol { + "proxy+" + } else { + "" + }, + path.display() + ) + } + } + } +} + +#[derive(Clone, Debug, Deserialize)] +// Incompatible with deny_unknown_fields due to serde(flatten). +pub(crate) struct ListenConfig { + #[serde(default = "ListenComponent::all_components")] + pub(crate) components: HashSet, + #[serde(flatten)] + pub(crate) transport: ListenTransport, +} + +impl Display for ListenConfig { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} ({})", + self.transport, + self.components + .iter() + .map(ListenComponent::to_string) + .collect::>() + .join(", ") + ) + } +} + +#[derive(Copy, Clone, Default, Debug, Deserialize, clap::ValueEnum)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "snake_case")] +pub(crate) enum LogFormat { + /// Multiple lines per event, includes all information + Pretty, + + /// One line per event, includes most information + #[default] + Full, + + /// One line per event, includes less information + Compact, + + /// One JSON object per line per event, includes most information + Json, +} + +impl Display for LogFormat { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + LogFormat::Pretty => write!(f, "pretty"), + LogFormat::Full => write!(f, "full"), + LogFormat::Compact => write!(f, "compact"), + LogFormat::Json => write!(f, "json"), + } + } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct TurnConfig { + pub(crate) username: String, + pub(crate) password: String, + pub(crate) uris: Vec, + pub(crate) secret: String, + pub(crate) ttl: u64, +} + +impl Default for TurnConfig { + fn default() -> Self { + Self { + username: String::new(), + password: String::new(), + uris: Vec::new(), + secret: String::new(), + ttl: 60 * 60 * 24, + } + } +} + +#[derive(Clone, Copy, Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "lowercase")] +pub(crate) enum DatabaseBackend { + #[cfg(feature = "rocksdb")] + Rocksdb, + #[cfg(feature = "sqlite")] + Sqlite, +} + +impl Display for DatabaseBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + #[cfg(feature = "rocksdb")] + DatabaseBackend::Rocksdb => write!(f, "RocksDB"), + #[cfg(feature = "sqlite")] + DatabaseBackend::Sqlite => write!(f, "SQLite"), + } + } +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct DatabaseConfig { + pub(crate) backend: DatabaseBackend, + pub(crate) path: PathBuf, + #[serde(default = "default_db_cache_capacity_mb")] + pub(crate) cache_capacity_mb: f64, + #[cfg(feature = "rocksdb")] + #[serde(default = "default_rocksdb_max_open_files")] + pub(crate) rocksdb_max_open_files: i32, +} + +#[derive(Clone, Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct MetricsConfig { + pub(crate) enable: bool, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct OtelTraceConfig { + pub(crate) enable: bool, + pub(crate) filter: EnvFilterClone, + pub(crate) endpoint: Option, + pub(crate) service_name: String, +} + +impl Default for OtelTraceConfig { + fn default() -> Self { + Self { + enable: false, + filter: default_tracing_filter(), + endpoint: None, + service_name: env!("CARGO_PKG_NAME").to_owned(), + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct FlameConfig { + pub(crate) enable: bool, + pub(crate) filter: EnvFilterClone, + pub(crate) filename: String, +} + +impl Default for FlameConfig { + fn default() -> Self { + Self { + enable: false, + filter: default_tracing_filter(), + filename: "./tracing.folded".to_owned(), + } + } +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct LogConfig { + pub(crate) filter: EnvFilterClone, + pub(crate) colors: bool, + pub(crate) format: LogFormat, + pub(crate) timestamp: bool, +} + +impl Default for LogConfig { + fn default() -> Self { + Self { + filter: default_tracing_filter(), + colors: true, + format: LogFormat::default(), + timestamp: true, + } + } +} + +#[derive(Debug, Default, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct ObservabilityConfig { + /// Prometheus metrics + pub(crate) metrics: MetricsConfig, + /// OpenTelemetry traces + pub(crate) traces: OtelTraceConfig, + /// Folded inferno stack traces + pub(crate) flame: FlameConfig, + /// Logging to stdout + pub(crate) logs: LogConfig, +} + +#[derive(Debug, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub(crate) struct FederationConfig { + pub(crate) enable: bool, + pub(crate) allow_invalid_tls_certificates: bool, + pub(crate) self_test: bool, + pub(crate) trusted_servers: Vec, + pub(crate) max_fetch_prev_events: u16, + pub(crate) max_concurrent_requests: u16, + pub(crate) old_verify_keys: BTreeMap, +} + +impl Default for FederationConfig { + fn default() -> Self { + Self { + enable: true, + self_test: true, + trusted_servers: vec![ + OwnedServerName::try_from("matrix.org").unwrap() + ], + max_fetch_prev_events: 100, + max_concurrent_requests: 100, + allow_invalid_tls_certificates: false, + old_verify_keys: BTreeMap::new(), + } + } +} + +fn false_fn() -> bool { + false +} + +fn true_fn() -> bool { + true +} + +fn default_listen() -> Vec { + vec![ListenConfig { + components: ListenComponent::all_components(), + transport: ListenTransport::Tcp { + address: default_address(), + port: default_port(), + tls: false, + proxy_protocol: false, + }, + }] +} + +fn default_address() -> IpAddr { + Ipv4Addr::LOCALHOST.into() +} + +fn default_port() -> u16 { + 6167 +} + +fn default_db_cache_capacity_mb() -> f64 { + 300.0 +} + +#[cfg(feature = "rocksdb")] +fn default_rocksdb_max_open_files() -> i32 { + 1000 +} + +fn default_cleanup_second_interval() -> u32 { + // every minute + 60 +} + +fn default_max_request_size() -> UInt { + // Default to 20 MB + (20_u32 * 1024 * 1024).into() +} + +pub(crate) fn default_tracing_filter() -> EnvFilterClone { + "info,ruma_state_res=warn" + .parse() + .expect("hardcoded env filter should be valid") +} + +// I know, it's a great name +pub(crate) fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V10 +} + +/// Search default locations for a configuration file +/// +/// If one isn't found, the list of tried paths is returned. +fn search() -> Result { + use error::ConfigSearch as Error; + + xdg::BaseDirectories::new()? + .find_config_file(&*DEFAULT_PATH) + .ok_or(Error::NotFound) +} + +/// Load the configuration from the given path or XDG Base Directories +pub(crate) async fn load

( + path: Option

, + sandboxed: bool, +) -> Result +where + P: AsRef, +{ + use error::Config as Error; + + let path = match path.as_ref().map(AsRef::as_ref) { + Some(x) => Cow::Borrowed(x), + None => Cow::Owned(search()?), + }; + + let path = path.as_ref(); + + let config: Config = toml::from_str( + &tokio::fs::read_to_string(path) + .await + .map_err(|e| Error::Read(e, path.to_owned()))?, + ) + .map_err(|e| Error::Parse(e, path.to_owned()))?; + + if config.registration_token.as_deref() == Some("") { + return Err(Error::RegistrationTokenEmpty); + } + + match &config.media.backend { + MediaBackendConfig::Filesystem(x) => { + if overlap(&x.path, &config.database.path) { + return Err(Error::DatabaseMediaOverlap); + } + + if !sandboxed { + let media_path = partial_canonicalize(&x.path) + .await + .map_err(|e| Error::Canonicalize(e, x.path.clone()))?; + + let database_path = partial_canonicalize(&config.database.path) + .await + .map_err(|e| { + Error::Canonicalize(e, config.database.path.clone()) + })?; + + if overlap(&media_path, &database_path) { + return Err(Error::DatabaseMediaOverlap); + } + } + } + } + + Ok(config) +} + +/// Returns `true` if two paths overlap. +fn overlap(a: &Path, b: &Path) -> bool { + a.starts_with(b) || b.starts_with(a) +} diff --git a/src/config/env_filter_clone.rs b/src/config/env_filter_clone.rs new file mode 100644 index 00000000..c4325fc2 --- /dev/null +++ b/src/config/env_filter_clone.rs @@ -0,0 +1,57 @@ +//! A workaround for [`EnvFilter`] not directly implementing [`Clone`] +//! +//! This will be unnecessary after [tokio-rs/tracing#2956][0] is merged. +//! +//! [0]: https://github.com/tokio-rs/tracing/pull/2956 +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + +use std::{fmt, str::FromStr}; + +use serde::{de, Deserialize, Deserializer}; +use tracing_subscriber::EnvFilter; + +/// A workaround for [`EnvFilter`] not directly implementing [`Clone`] +/// +/// Use [`FromStr`] or [`Deserialize`] to construct this type, then [`From`] or +/// [`Into`] to convert it into an [`EnvFilter`] when needed. +#[derive(Debug, Clone)] +pub(crate) struct EnvFilterClone(pub(crate) String); + +impl FromStr for EnvFilterClone { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + EnvFilter::from_str(s)?; + Ok(Self(s.to_owned())) + } +} + +impl fmt::Display for EnvFilterClone { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From<&EnvFilterClone> for EnvFilter { + fn from(other: &EnvFilterClone) -> Self { + EnvFilter::from_str(&other.0) + .expect("env filter syntax should have been validated already") + } +} + +impl From for EnvFilter { + fn from(other: EnvFilterClone) -> Self { + EnvFilter::from_str(&other.0) + .expect("env filter syntax should have been validated already") + } +} + +impl<'de> Deserialize<'de> for EnvFilterClone { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Self::from_str(&s).map_err(de::Error::custom) + } +} diff --git a/src/config/mod.rs b/src/config/mod.rs deleted file mode 100644 index fb1e2f31..00000000 --- a/src/config/mod.rs +++ /dev/null @@ -1,272 +0,0 @@ -use std::{ - collections::BTreeMap, - fmt, - net::{IpAddr, Ipv4Addr}, -}; - -use ruma::{OwnedServerName, RoomVersionId}; -use serde::{de::IgnoredAny, Deserialize}; -use tracing::warn; - -mod proxy; - -use self::proxy::ProxyConfig; - -#[derive(Clone, Debug, Deserialize)] -pub struct Config { - #[serde(default = "default_address")] - pub address: IpAddr, - #[serde(default = "default_port")] - pub port: u16, - pub tls: Option, - - pub server_name: OwnedServerName, - pub database_backend: String, - pub database_path: String, - #[serde(default = "default_db_cache_capacity_mb")] - pub db_cache_capacity_mb: f64, - #[serde(default = "true_fn")] - pub enable_lightning_bolt: bool, - #[serde(default = "true_fn")] - pub allow_check_for_updates: bool, - #[serde(default = "default_conduit_cache_capacity_modifier")] - pub conduit_cache_capacity_modifier: f64, - #[serde(default = "default_rocksdb_max_open_files")] - pub rocksdb_max_open_files: i32, - #[serde(default = "default_pdu_cache_capacity")] - pub pdu_cache_capacity: u32, - #[serde(default = "default_cleanup_second_interval")] - pub cleanup_second_interval: u32, - #[serde(default = "default_max_request_size")] - pub max_request_size: u32, - #[serde(default = "default_max_concurrent_requests")] - pub max_concurrent_requests: u16, - #[serde(default = "default_max_fetch_prev_events")] - pub max_fetch_prev_events: u16, - #[serde(default = "false_fn")] - pub allow_registration: bool, - pub registration_token: Option, - #[serde(default = "true_fn")] - pub allow_encryption: bool, - #[serde(default = "false_fn")] - pub allow_federation: bool, - #[serde(default = "true_fn")] - pub allow_room_creation: bool, - #[serde(default = "true_fn")] - pub allow_unstable_room_versions: bool, - #[serde(default = "default_default_room_version")] - pub default_room_version: RoomVersionId, - pub well_known_client: Option, - #[serde(default = "false_fn")] - pub allow_jaeger: bool, - #[serde(default = "false_fn")] - pub tracing_flame: bool, - #[serde(default)] - pub proxy: ProxyConfig, - pub jwt_secret: Option, - #[serde(default = "default_trusted_servers")] - pub trusted_servers: Vec, - #[serde(default = "default_log")] - pub log: String, - #[serde(default)] - pub turn_username: String, - #[serde(default)] - pub turn_password: String, - #[serde(default = "Vec::new")] - pub turn_uris: Vec, - #[serde(default)] - pub turn_secret: String, - #[serde(default = "default_turn_ttl")] - pub turn_ttl: u64, - - pub emergency_password: Option, - - #[serde(flatten)] - pub catchall: BTreeMap, -} - -#[derive(Clone, Debug, Deserialize)] -pub struct TlsConfig { - pub certs: String, - pub key: String, -} - -const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; - -impl Config { - pub fn warn_deprecated(&self) { - let mut was_deprecated = false; - for key in self - .catchall - .keys() - .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) - { - warn!("Config parameter {} is deprecated", key); - was_deprecated = true; - } - - if was_deprecated { - warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); - } - } -} - -impl fmt::Display for Config { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Prepare a list of config values to show - let lines = [ - ("Server name", self.server_name.host()), - ("Database backend", &self.database_backend), - ("Database path", &self.database_path), - ( - "Database cache capacity (MB)", - &self.db_cache_capacity_mb.to_string(), - ), - ( - "Cache capacity modifier", - &self.conduit_cache_capacity_modifier.to_string(), - ), - #[cfg(feature = "rocksdb")] - ( - "Maximum open files for RocksDB", - &self.rocksdb_max_open_files.to_string(), - ), - ("PDU cache capacity", &self.pdu_cache_capacity.to_string()), - ( - "Cleanup interval in seconds", - &self.cleanup_second_interval.to_string(), - ), - ("Maximum request size", &self.max_request_size.to_string()), - ( - "Maximum concurrent requests", - &self.max_concurrent_requests.to_string(), - ), - ("Allow registration", &self.allow_registration.to_string()), - ( - "Enabled lightning bolt", - &self.enable_lightning_bolt.to_string(), - ), - ("Allow encryption", &self.allow_encryption.to_string()), - ("Allow federation", &self.allow_federation.to_string()), - ("Allow room creation", &self.allow_room_creation.to_string()), - ( - "JWT secret", - match self.jwt_secret { - Some(_) => "set", - None => "not set", - }, - ), - ("Trusted servers", { - let mut lst = vec![]; - for server in &self.trusted_servers { - lst.push(server.host()); - } - &lst.join(", ") - }), - ( - "TURN username", - if self.turn_username.is_empty() { - "not set" - } else { - &self.turn_username - }, - ), - ("TURN password", { - if self.turn_password.is_empty() { - "not set" - } else { - "set" - } - }), - ("TURN secret", { - if self.turn_secret.is_empty() { - "not set" - } else { - "set" - } - }), - ("Turn TTL", &self.turn_ttl.to_string()), - ("Turn URIs", { - let mut lst = vec![]; - for item in self.turn_uris.iter().cloned().enumerate() { - let (_, uri): (usize, String) = item; - lst.push(uri); - } - &lst.join(", ") - }), - ]; - - let mut msg: String = "Active config values:\n\n".to_owned(); - - for line in lines.into_iter().enumerate() { - msg += &format!("{}: {}\n", line.1 .0, line.1 .1); - } - - write!(f, "{msg}") - } -} - -fn false_fn() -> bool { - false -} - -fn true_fn() -> bool { - true -} - -fn default_address() -> IpAddr { - Ipv4Addr::LOCALHOST.into() -} - -fn default_port() -> u16 { - 8000 -} - -fn default_db_cache_capacity_mb() -> f64 { - 300.0 -} - -fn default_conduit_cache_capacity_modifier() -> f64 { - 1.0 -} - -fn default_rocksdb_max_open_files() -> i32 { - 1000 -} - -fn default_pdu_cache_capacity() -> u32 { - 150_000 -} - -fn default_cleanup_second_interval() -> u32 { - 60 // every minute -} - -fn default_max_request_size() -> u32 { - 20 * 1024 * 1024 // Default to 20 MB -} - -fn default_max_concurrent_requests() -> u16 { - 100 -} - -fn default_max_fetch_prev_events() -> u16 { - 100_u16 -} - -fn default_trusted_servers() -> Vec { - vec![OwnedServerName::try_from("matrix.org").unwrap()] -} - -fn default_log() -> String { - "warn,state_res=warn,_=off".to_owned() -} - -fn default_turn_ttl() -> u64 { - 60 * 60 * 24 -} - -// I know, it's a great name -pub fn default_default_room_version() -> RoomVersionId { - RoomVersionId::V10 -} diff --git a/src/config/proxy.rs b/src/config/proxy.rs index c03463e7..af6df291 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -10,13 +10,12 @@ use crate::Result; /// ``` /// - Global proxy /// ```toml -/// [global.proxy] +/// [proxy] /// global = { url = "socks5h://localhost:9050" } /// ``` /// - Proxy some domains /// ```toml -/// [global.proxy] -/// [[global.proxy.by_domain]] +/// [[proxy.by_domain]] /// url = "socks5h://localhost:9050" /// include = ["*.onion", "matrix.myspecial.onion"] /// exclude = ["*.myspecial.onion"] @@ -24,13 +23,14 @@ use crate::Result; /// ## Include vs. Exclude /// If include is an empty list, it is assumed to be `["*"]`. /// -/// If a domain matches both the exclude and include list, the proxy will only be used if it was -/// included because of a more specific rule than it was excluded. In the above example, the proxy -/// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. +/// If a domain matches both the exclude and include list, the proxy will only +/// be used if it was included because of a more specific rule than it was +/// excluded. In the above example, the proxy would be used for +/// `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "snake_case")] #[derive(Default)] -pub enum ProxyConfig { +pub(crate) enum ProxyConfig { #[default] None, Global { @@ -40,19 +40,22 @@ pub enum ProxyConfig { ByDomain(Vec), } impl ProxyConfig { - pub fn to_proxy(&self) -> Result> { + pub(crate) fn to_proxy(&self) -> Result> { Ok(match self.clone() { ProxyConfig::None => None, - ProxyConfig::Global { url } => Some(Proxy::all(url)?), + ProxyConfig::Global { + url, + } => Some(Proxy::all(url)?), ProxyConfig::ByDomain(proxies) => Some(Proxy::custom(move |url| { - proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() // first matching proxy + // first matching proxy + proxies.iter().find_map(|proxy| proxy.for_url(url)).cloned() })), }) } } #[derive(Clone, Debug, Deserialize)] -pub struct PartialProxyConfig { +pub(crate) struct PartialProxyConfig { #[serde(deserialize_with = "crate::utils::deserialize_from_str")] url: Url, #[serde(default)] @@ -61,13 +64,15 @@ pub struct PartialProxyConfig { exclude: Vec, } impl PartialProxyConfig { - pub fn for_url(&self, url: &Url) -> Option<&Url> { + pub(crate) fn for_url(&self, url: &Url) -> Option<&Url> { let domain = url.domain()?; - let mut included_because = None; // most specific reason it was included - let mut excluded_because = None; // most specific reason it was excluded + // most specific reason it was included + let mut included_because = None; + // most specific reason it was excluded + let mut excluded_because = None; if self.include.is_empty() { // treat empty include list as `*` - included_because = Some(&WildCardedDomain::WildCard) + included_because = Some(&WildCardedDomain::WildCard); } for wc_domain in &self.include { if wc_domain.matches(domain) { @@ -86,7 +91,8 @@ impl PartialProxyConfig { } } match (included_because, excluded_because) { - (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), // included for a more specific reason than excluded + // included for a more specific reason than excluded + (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), (Some(_), None) => Some(&self.url), _ => None, } @@ -95,42 +101,46 @@ impl PartialProxyConfig { /// A domain name, that optionally allows a * as its first subdomain. #[derive(Clone, Debug)] -pub enum WildCardedDomain { +pub(crate) enum WildCardedDomain { WildCard, WildCarded(String), Exact(String), } impl WildCardedDomain { - pub fn matches(&self, domain: &str) -> bool { + pub(crate) fn matches(&self, domain: &str) -> bool { match self { WildCardedDomain::WildCard => true, WildCardedDomain::WildCarded(d) => domain.ends_with(d), WildCardedDomain::Exact(d) => domain == d, } } - pub fn more_specific_than(&self, other: &Self) -> bool { + + pub(crate) fn more_specific_than(&self, other: &Self) -> bool { match (self, other) { (WildCardedDomain::WildCard, WildCardedDomain::WildCard) => false, (_, WildCardedDomain::WildCard) => true, - (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => other.matches(a), - (WildCardedDomain::WildCarded(a), WildCardedDomain::WildCarded(b)) => { - a != b && a.ends_with(b) + (WildCardedDomain::Exact(a), WildCardedDomain::WildCarded(_)) => { + other.matches(a) } + ( + WildCardedDomain::WildCarded(a), + WildCardedDomain::WildCarded(b), + ) => a != b && a.ends_with(b), _ => false, } } } impl std::str::FromStr for WildCardedDomain { type Err = std::convert::Infallible; + fn from_str(s: &str) -> Result { // maybe do some domain validation? - Ok(if s.starts_with("*.") { - WildCardedDomain::WildCarded(s[1..].to_owned()) - } else if s == "*" { - WildCardedDomain::WildCarded("".to_owned()) - } else { - WildCardedDomain::Exact(s.to_owned()) - }) + Ok(s.strip_prefix("*.") + .map(|x| WildCardedDomain::WildCarded(x.to_owned())) + .or_else(|| { + (s == "*").then(|| WildCardedDomain::WildCarded(String::new())) + }) + .unwrap_or_else(|| WildCardedDomain::Exact(s.to_owned()))) } } impl<'de> Deserialize<'de> for WildCardedDomain { diff --git a/src/database.rs b/src/database.rs new file mode 100644 index 00000000..16a109e7 --- /dev/null +++ b/src/database.rs @@ -0,0 +1,1062 @@ +use std::{ + collections::{HashMap, HashSet}, + fs, + io::Write, + mem::size_of, + sync::Arc, +}; + +use ruma::{ + events::push_rules::PushRulesEventContent, push::Ruleset, serde::Raw, + EventId, OwnedRoomId, RoomId, UserId, +}; +use tracing::{debug, error, info, info_span, warn, Instrument}; + +use crate::{ + config::DatabaseBackend, + service::{ + media::MediaFileKey, + rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::CompressedStateEvent, + }, + }, + services, utils, Config, Error, Result, +}; + +pub(crate) mod abstraction; +pub(crate) mod key_value; + +use abstraction::{KeyValueDatabaseEngine, KvTree}; + +pub(crate) struct KeyValueDatabase { + db: Arc, + + // Trees "owned" by `self::key_value::globals` + pub(super) global: Arc, + pub(super) server_signingkeys: Arc, + + // Trees "owned" by `self::key_value::users` + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, + pub(super) userdeviceid_token: Arc, + + // This is also used to check if a device exists + pub(super) userdeviceid_metadata: Arc, + + // DevicelistVersion = u64 + pub(super) userid_devicelistversion: Arc, + pub(super) token_userdeviceid: Arc, + + // OneTimeKeyId = UserId + DeviceKeyId + pub(super) onetimekeyid_onetimekeys: Arc, + + // LastOneTimeKeyUpdate = Count + pub(super) userid_lastonetimekeyupdate: Arc, + + // KeyChangeId = UserId/RoomId + Count + pub(super) keychangeid_userid: Arc, + + // KeyId = UserId + KeyId (depends on key type) + pub(super) keyid_key: Arc, + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, + + // UserFilterId = UserId + FilterId + pub(super) userfilterid_filter: Arc, + + // ToDeviceId = UserId + DeviceId + Count + pub(super) todeviceid_events: Arc, + + // Trees "owned" by `self::key_value::uiaa` + // User-interactive authentication + pub(super) userdevicesessionid_uiaainfo: Arc, + + // Trees "owned" by `self::key_value::rooms::edus` + // ReadReceiptId = RoomId + Count + UserId + pub(super) readreceiptid_readreceipt: Arc, + + // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_privateread: Arc, + + // LastPrivateReadUpdate = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, + + // PresenceId = RoomId + Count + UserId + // This exists in the database already but is currently unused + #[allow(dead_code)] + pub(super) presenceid_presence: Arc, + + // LastPresenceUpdate = Count + // This exists in the database already but is currently unused + #[allow(dead_code)] + pub(super) userid_lastpresenceupdate: Arc, + + // Trees "owned" by `self::key_value::rooms` + // PduId = ShortRoomId + Count + pub(super) pduid_pdu: Arc, + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + + // AliasId = RoomId + Count + pub(super) aliasid_alias: Arc, + pub(super) publicroomids: Arc, + + // ThreadId = RoomId + Count + pub(super) threadid_userids: Arc, + + // TokenId = ShortRoomId + Token + PduIdCount + pub(super) tokenids: Arc, + + /// Participating servers in a room. + // RoomServerId = RoomId + ServerName + pub(super) roomserverids: Arc, + + // ServerRoomId = ServerName + RoomId + pub(super) serverroomids: Arc, + + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + + // InviteState = Vec> + pub(super) userroomid_invitestate: Arc, + + // InviteCount = Count + pub(super) roomuserid_invitecount: Arc, + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, + + // Rooms where incoming federation handling is disabled + pub(super) disabledroomids: Arc, + + // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + pub(super) lazyloadedids: Arc, + + // NotifyCount = u64 + pub(super) userroomid_notificationcount: Arc, + + // HightlightCount = u64 + pub(super) userroomid_highlightcount: Arc, + + // LastNotificationRead = u64 + pub(super) roomuserid_lastnotificationread: Arc, + + /// Remember the current state hash of a room. + pub(super) roomid_shortstatehash: Arc, + + pub(super) roomsynctoken_shortstatehash: Arc, + + /// Remember the state hash at events in the past. + pub(super) shorteventid_shortstatehash: Arc, + + /// `StateKey = EventType + StateKey`, `ShortStateKey = Count` + pub(super) statekey_shortstatekey: Arc, + pub(super) shortstatekey_statekey: Arc, + + pub(super) roomid_shortroomid: Arc, + + pub(super) shorteventid_eventid: Arc, + pub(super) eventid_shorteventid: Arc, + + pub(super) statehash_shortstatehash: Arc, + + // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + + // (shortstatekey+shorteventid--) + pub(super) shortstatehash_statediff: Arc, + + pub(super) shorteventid_authchain: Arc, + + /// `RoomId + EventId -> outlier PDU` + /// + /// Any pdu that has passed the steps 1-8 in the incoming event + /// /federation/send/txn. + pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, + + /// `ShortEventId + ShortEventId -> ()` + pub(super) tofrom_relation: Arc, + + /// `RoomId + EventId -> Parent PDU EventId` + pub(super) referencedevents: Arc, + + // Trees "owned" by `self::key_value::account_data` + // RoomUserDataId = Room + User + Count + Type + pub(super) roomuserdataid_accountdata: Arc, + + // RoomUserType = Room + User + Type + pub(super) roomusertype_roomuserdataid: Arc, + + // Trees "owned" by `self::key_value::media` + // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + pub(super) mediaid_file: Arc, + + // Trees "owned" by `self::key_value::key_backups` + // BackupId = UserId + Version(Count) + pub(super) backupid_algorithm: Arc, + + // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, + + // BackupKeyId = UserId + Version + RoomId + SessionId + pub(super) backupkeyid_backup: Arc, + + // Trees "owned" by `self::key_value::transaction_ids` + // Response can be empty (/sendToDevice) or the event id (/send) + pub(super) userdevicetxnid_response: Arc, + + // Trees "owned" by `self::key_value::sending` + // EduCount: Count of last EDU sync + pub(super) servername_educount: Arc, + + // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id + // (for edus), Data = EDU content + pub(super) servernameevent_data: Arc, + + // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for + // edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, + + // Trees "owned" by `self::key_value::appservice` + pub(super) id_appserviceregistrations: Arc, + + // Trees "owned" by `self::key_value::pusher` + pub(super) senderkey_pusher: Arc, +} + +impl KeyValueDatabase { + fn check_db_setup(config: &Config) -> Result<()> { + let sqlite_exists = config + .database + .path + .join(format!( + "{}.db", + if config.conduit_compat { + "conduit" + } else { + "grapevine" + } + )) + .exists(); + let rocksdb_exists = config.database.path.join("IDENTITY").exists(); + + let mut count = 0; + + if sqlite_exists { + count += 1; + } + + if rocksdb_exists { + count += 1; + } + + if count > 1 { + warn!("Multiple databases at database_path detected"); + return Ok(()); + } + + let (backend_is_rocksdb, backend_is_sqlite): (bool, bool) = + match config.database.backend { + #[cfg(feature = "rocksdb")] + DatabaseBackend::Rocksdb => (true, false), + #[cfg(feature = "sqlite")] + DatabaseBackend::Sqlite => (false, true), + }; + + if sqlite_exists && !backend_is_sqlite { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in \ + config.", + )); + } + + if rocksdb_exists && !backend_is_rocksdb { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in \ + config.", + )); + } + + Ok(()) + } + + /// Load an existing database or create a new one, and initialize all + /// services with the loaded database. + #[cfg_attr( + not(any(feature = "rocksdb", feature = "sqlite")), + allow(unreachable_code) + )] + #[allow(clippy::too_many_lines)] + pub(crate) fn load_or_create(config: &Config) -> Result { + Self::check_db_setup(config)?; + + if !config.database.path.exists() { + fs::create_dir_all(&config.database.path).map_err(|_| { + Error::BadConfig( + "Database folder doesn't exists and couldn't be created \ + (e.g. due to missing permissions). Please create the \ + database folder yourself.", + ) + })?; + } + + #[cfg_attr( + not(any(feature = "rocksdb", feature = "sqlite")), + allow(unused_variables) + )] + let builder: Arc = + match config.database.backend { + #[cfg(feature = "sqlite")] + DatabaseBackend::Sqlite => { + Arc::new(Arc::::open(config)?) + } + #[cfg(feature = "rocksdb")] + DatabaseBackend::Rocksdb => { + Arc::new(Arc::::open(config)?) + } + }; + + let db = Self { + db: builder.clone(), + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder + .open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder + .open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder + .open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder + .open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder + .open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder + .open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, + + userdevicesessionid_uiaainfo: builder + .open_tree("userdevicesessionid_uiaainfo")?, + readreceiptid_readreceipt: builder + .open_tree("readreceiptid_readreceipt")?, + // "Private" read receipt + roomuserid_privateread: builder + .open_tree("roomuserid_privateread")?, + roomuserid_lastprivatereadupdate: builder + .open_tree("roomuserid_lastprivatereadupdate")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: builder + .open_tree("userid_lastpresenceupdate")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, + + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, + + threadid_userids: builder.open_tree("threadid_userids")?, + + tokenids: builder.open_tree("tokenids")?, + + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, + roomuseroncejoinedids: builder + .open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder + .open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder + .open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + + disabledroomids: builder.open_tree("disabledroomids")?, + + lazyloadedids: builder.open_tree("lazyloadedids")?, + + userroomid_notificationcount: builder + .open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder + .open_tree("userroomid_highlightcount")?, + roomuserid_lastnotificationread: builder + .open_tree("userroomid_highlightcount")?, + + statekey_shortstatekey: builder + .open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder + .open_tree("shortstatekey_statekey")?, + + shorteventid_authchain: builder + .open_tree("shorteventid_authchain")?, + + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + + shortstatehash_statediff: builder + .open_tree("shortstatehash_statediff")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder + .open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder + .open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder + .open_tree("roomsynctoken_shortstatehash")?, + statehash_shortstatehash: builder + .open_tree("statehash_shortstatehash")?, + + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, + + tofrom_relation: builder.open_tree("tofrom_relation")?, + referencedevents: builder.open_tree("referencedevents")?, + roomuserdataid_accountdata: builder + .open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder + .open_tree("roomusertype_roomuserdataid")?, + mediaid_file: builder.open_tree("mediaid_file")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, + userdevicetxnid_response: builder + .open_tree("userdevicetxnid_response")?, + servername_educount: builder.open_tree("servername_educount")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder + .open_tree("servercurrentevent_data")?, + id_appserviceregistrations: builder + .open_tree("id_appserviceregistrations")?, + senderkey_pusher: builder.open_tree("senderkey_pusher")?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, + }; + + Ok(db) + } + + /// Ensure that the database is at the current version, applying migrations + /// if necessary. + /// + /// If it is not possible to migrate the database to the current version, + /// returns an error. + #[allow(clippy::too_many_lines)] + pub(crate) async fn apply_migrations(&self) -> Result<()> { + // If the database has any data, perform data migrations before starting + let latest_database_version = 13; + + if services().users.count()? > 0 { + // MIGRATIONS + migration(1, || { + for (roomserverid, _) in self.roomserverids.iter() { + let mut parts = roomserverid.split(|&b| b == 0xFF); + let room_id = + parts.next().expect("split always returns one element"); + let Some(servername) = parts.next() else { + error!("Migration: Invalid roomserverid in db."); + continue; + }; + let mut serverroomid = servername.to_vec(); + serverroomid.push(0xFF); + serverroomid.extend_from_slice(room_id); + + self.serverroomids.insert(&serverroomid, &[])?; + } + Ok(()) + })?; + + migration(2, || { + // We accidentally inserted hashed versions of "" into the db + // instead of just "" + for (userid, password) in self.userid_password.iter() { + let password = utils::string_from_bytes(&password); + + let empty_hashed_password = + password.is_ok_and(|password| { + utils::verify_password("", password) + }); + + if empty_hashed_password { + self.userid_password.insert(&userid, b"")?; + } + } + Ok(()) + })?; + + migration(3, || { + // Move media to filesystem + for (key, content) in self.mediaid_file.iter() { + let key = MediaFileKey::new(key); + if content.is_empty() { + continue; + } + + let path = services().globals.get_media_file(&key); + let mut file = fs::File::create(path)?; + file.write_all(&content)?; + self.mediaid_file.insert(key.as_bytes(), &[])?; + } + Ok(()) + })?; + + migration(4, || { + // Add federated users to services() as deactivated + for our_user in services().users.iter() { + let our_user = our_user?; + if services().users.is_deactivated(&our_user)? { + continue; + } + for room in + services().rooms.state_cache.rooms_joined(&our_user) + { + for user in + services().rooms.state_cache.room_members(&room?) + { + let user = user?; + if user.server_name() + != services().globals.server_name() + { + info!(?user, "Migration: creating user"); + services().users.create(&user, None)?; + } + } + } + } + Ok(()) + })?; + + migration(5, || { + // Upgrade user data store + for (roomuserdataid, _) in + self.roomuserdataid_accountdata.iter() + { + let mut parts = roomuserdataid.split(|&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let user_id = parts.next().unwrap(); + let event_type = + roomuserdataid.rsplit(|&b| b == 0xFF).next().unwrap(); + + let mut key = room_id.to_vec(); + key.push(0xFF); + key.extend_from_slice(user_id); + key.push(0xFF); + key.extend_from_slice(event_type); + + self.roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + } + Ok(()) + })?; + + migration(6, || { + // Set room member count + for (roomid, _) in self.roomid_shortstatehash.iter() { + let string = utils::string_from_bytes(&roomid).unwrap(); + let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); + services() + .rooms + .state_cache + .update_joined_count(room_id)?; + } + Ok(()) + })?; + + migration(7, || { + // Upgrade state store + let mut last_roomstates: HashMap = + HashMap::new(); + let mut current_sstatehash: Option = None; + let mut current_room = None; + let mut current_state = HashSet::new(); + let mut counter = 0; + + let mut handle_state = + |current_sstatehash: ShortStateHash, + current_room: &RoomId, + current_state: HashSet<_>, + last_roomstates: &mut HashMap<_, _>| { + counter += 1; + let last_roomsstatehash = + last_roomstates.get(current_room); + + let states_parents = last_roomsstatehash.map_or_else( + || Ok(Vec::new()), + |&last_roomsstatehash| { + services() + .rooms + .state_compressor + .load_shortstatehash_info( + last_roomsstatehash, + ) + }, + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = + states_parents.last() + { + let statediffnew = current_state + .difference(&parent_stateinfo.full_state) + .copied() + .collect::>(); + + let statediffremoved = parent_stateinfo + .full_state + .difference(¤t_state) + .copied() + .collect::>(); + + (statediffnew, statediffremoved) + } else { + (current_state, HashSet::new()) + }; + + services() + .rooms + .state_compressor + .save_state_from_diff( + current_sstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + // every state change is 2 event changes on + // average + 2, + states_parents, + )?; + + Ok::<_, Error>(()) + }; + + for (k, seventid) in + self.db.open_tree("stateid_shorteventid")?.iter() + { + let sstatehash = ShortStateHash::new( + utils::u64_from_bytes(&k[0..size_of::()]) + .expect("number of bytes is correct"), + ); + let sstatekey = ShortStateKey::new( + utils::u64_from_bytes(&k[size_of::()..]) + .expect("number of bytes is correct"), + ); + if Some(sstatehash) != current_sstatehash { + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + last_roomstates.insert( + current_room.clone().unwrap(), + current_sstatehash, + ); + } + current_state = HashSet::new(); + current_sstatehash = Some(sstatehash); + + let event_id = self + .shorteventid_eventid + .get(&seventid) + .unwrap() + .unwrap(); + let string = + utils::string_from_bytes(&event_id).unwrap(); + let event_id = + <&EventId>::try_from(string.as_str()).unwrap(); + let pdu = services() + .rooms + .timeline + .get_pdu(event_id) + .unwrap() + .unwrap(); + + if Some(&pdu.room_id) != current_room.as_ref() { + current_room = Some(pdu.room_id.clone()); + } + } + + let seventid = ShortEventId::new( + utils::u64_from_bytes(&seventid) + .expect("number of bytes is correct"), + ); + current_state.insert(CompressedStateEvent { + state: sstatekey, + event: seventid, + }); + } + + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + } + Ok(()) + })?; + + migration(8, || { + // Generate short room ids for all rooms + for (room_id, _) in self.roomid_shortstatehash.iter() { + let shortroomid = + services().globals.next_count()?.to_be_bytes(); + self.roomid_shortroomid.insert(&room_id, &shortroomid)?; + info!("Migration: 8"); + } + // Update pduids db layout + let mut batch = self.pduid_pdu.iter().filter_map(|(key, v)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(2, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = self + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_key = short_room_id; + new_key.extend_from_slice(count); + + Some((new_key, v)) + }); + + self.pduid_pdu.insert_batch(&mut batch)?; + + let mut batch2 = + self.eventid_pduid.iter().filter_map(|(k, value)| { + if !value.starts_with(b"!") { + return None; + } + let mut parts = value.splitn(2, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = self + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_value = short_room_id; + new_value.extend_from_slice(count); + + Some((k, new_value)) + }); + + self.eventid_pduid.insert_batch(&mut batch2)?; + Ok(()) + })?; + + migration(9, || { + // Update tokenids db layout + let mut iter = self + .tokenids + .iter() + .filter_map(|(key, _)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(4, |&b| b == 0xFF); + let room_id = parts.next().unwrap(); + let word = parts.next().unwrap(); + let _pdu_id_room = parts.next().unwrap(); + let pdu_id_count = parts.next().unwrap(); + + let short_room_id = self + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + let mut new_key = short_room_id; + new_key.extend_from_slice(word); + new_key.push(0xFF); + new_key.extend_from_slice(pdu_id_count); + Some((new_key, Vec::new())) + }) + .peekable(); + + while iter.peek().is_some() { + self.tokenids + .insert_batch(&mut iter.by_ref().take(1000))?; + debug!("Inserted smaller batch"); + } + + info!("Deleting starts"); + + let batch2: Vec<_> = self + .tokenids + .iter() + .filter_map(|(key, _)| key.starts_with(b"!").then_some(key)) + .collect(); + + for key in batch2 { + self.tokenids.remove(&key)?; + } + Ok(()) + })?; + + migration(10, || { + // Add other direction for shortstatekeys + for (statekey, shortstatekey) in + self.statekey_shortstatekey.iter() + { + self.shortstatekey_statekey + .insert(&shortstatekey, &statekey)?; + } + + // Force E2EE device list updates so we can send them over + // federation + for user_id in services().users.iter().filter_map(Result::ok) { + services().users.mark_device_key_update(&user_id)?; + } + Ok(()) + })?; + + migration(11, || { + self.db + .open_tree("userdevicesessionid_uiaarequest")? + .clear()?; + Ok(()) + })?; + + migration(12, || { + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) { + Ok(u) => u, + Err(error) => { + warn!( + %error, + user_localpart = %username, + "Invalid username", + ); + continue; + } + }; + + let raw_rules_list = services() + .account_data + .get_global::(&user) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = + raw_rules_list.deserialize().unwrap(); + let rules_list = &mut account_data.global; + + //content rule + { + let content_rule_transformation = [ + ".m.rules.contains_user_name", + ".m.rule.contains_user_name", + ]; + + let rule = rules_list + .content + .get(content_rule_transformation[0]); + if rule.is_some() { + let mut rule = rule.unwrap().clone(); + content_rule_transformation[1] + .clone_into(&mut rule.rule_id); + rules_list + .content + .shift_remove(content_rule_transformation[0]); + rules_list.content.insert(rule); + } + } + + //underride rules + { + let underride_rule_transformation = [ + [".m.rules.call", ".m.rule.call"], + [ + ".m.rules.room_one_to_one", + ".m.rule.room_one_to_one", + ], + [ + ".m.rules.encrypted_room_one_to_one", + ".m.rule.encrypted_room_one_to_one", + ], + [".m.rules.message", ".m.rule.message"], + [".m.rules.encrypted", ".m.rule.encrypted"], + ]; + + for transformation in underride_rule_transformation { + let rule = + rules_list.underride.get(transformation[0]); + if let Some(rule) = rule { + let mut rule = rule.clone(); + transformation[1].clone_into(&mut rule.rule_id); + rules_list + .underride + .shift_remove(transformation[0]); + rules_list.underride.insert(rule); + } + } + } + + services().account_data.update_global( + &user, + &Raw::new(&account_data) + .expect("json serialization should always succeed"), + )?; + } + Ok(()) + })?; + + // This migration can be reused as-is anytime the server-default + // rules are updated. + migration(13, || { + for username in services().users.list_local_users()? { + let user = match UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) { + Ok(u) => u, + Err(error) => { + warn!( + %error, + user_localpart = %username, + "Invalid username", + ); + continue; + } + }; + + let raw_rules_list = services() + .account_data + .get_global::(&user) + .unwrap() + .expect("Username is invalid"); + + let mut account_data = + raw_rules_list.deserialize().unwrap(); + + let user_default_rules = Ruleset::server_default(&user); + account_data + .global + .update_with_server_default(user_default_rules); + + services().account_data.update_global( + &user, + &Raw::new(&account_data) + .expect("json serialization should always succeed"), + )?; + } + Ok(()) + })?; + + assert_eq!( + services().globals.database_version().unwrap(), + latest_database_version, + "database should be migrated to the current version", + ); + + info!( + backend = %services().globals.config.database.backend, + version = latest_database_version, + "Loaded database", + ); + } else { + services() + .globals + .bump_database_version(latest_database_version)?; + + // Create the admin room and server user on first run + services().admin.create_admin_room().await?; + + info!( + backend = %services().globals.config.database.backend, + version = latest_database_version, + "Created new database", + ); + } + + Ok(()) + } + + #[tracing::instrument] + pub(crate) fn start_cleanup_task() { + use std::time::{Duration, Instant}; + + #[cfg(unix)] + use tokio::signal::unix::{signal, SignalKind}; + use tokio::time::interval; + + let timer_interval = Duration::from_secs(u64::from( + services().globals.config.cleanup_second_interval, + )); + + tokio::spawn(async move { + let mut i = interval(timer_interval); + #[cfg(unix)] + let mut s = signal(SignalKind::hangup()).unwrap(); + + loop { + #[cfg(unix)] + let msg = tokio::select! { + _ = i.tick() => || { + debug!("cleanup: Timer ticked"); + }, + _ = s.recv() => || { + debug!("cleanup: Received SIGHUP"); + }, + }; + #[cfg(not(unix))] + let msg = { + i.tick().await; + || debug!("cleanup: Timer ticked") + }; + + async { + msg(); + let start = Instant::now(); + if let Err(error) = services().globals.cleanup() { + error!(%error, "cleanup: Error"); + } else { + debug!(elapsed = ?start.elapsed(), "cleanup: Finished"); + } + } + .instrument(info_span!("database_cleanup")) + .await; + } + }); + } +} + +/// If the current version is older than `new_version`, execute a migration +/// function. +fn migration(new_version: u64, migration: F) -> Result<(), Error> +where + F: FnOnce() -> Result<(), Error>, +{ + let current_version = services().globals.database_version()?; + if current_version < new_version { + migration()?; + services().globals.bump_database_version(new_version)?; + warn!("Migration: {current_version} -> {new_version} finished"); + } + Ok(()) +} diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 0a321054..63714cd3 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -1,51 +1,35 @@ -use super::Config; -use crate::Result; - use std::{future::Future, pin::Pin, sync::Arc}; -#[cfg(feature = "sled")] -pub mod sled; +use crate::Result; #[cfg(feature = "sqlite")] -pub mod sqlite; - -#[cfg(feature = "heed")] -pub mod heed; +pub(crate) mod sqlite; #[cfg(feature = "rocksdb")] -pub mod rocksdb; +pub(crate) mod rocksdb; -#[cfg(feature = "persy")] -pub mod persy; +#[cfg(any(feature = "sqlite", feature = "rocksdb",))] +pub(crate) mod watchers; -#[cfg(any( - feature = "sqlite", - feature = "rocksdb", - feature = "heed", - feature = "persy" -))] -pub mod watchers; - -pub trait KeyValueDatabaseEngine: Send + Sync { - fn open(config: &Config) -> Result +pub(crate) trait KeyValueDatabaseEngine: Send + Sync { + #[cfg(any(feature = "sqlite", feature = "rocksdb"))] + fn open(config: &super::Config) -> Result where Self: Sized; fn open_tree(&self, name: &'static str) -> Result>; - fn flush(&self) -> Result<()>; fn cleanup(&self) -> Result<()> { Ok(()) } - fn memory_usage(&self) -> Result { - Ok("Current database engine does not support memory usage reporting.".to_owned()) - } - fn clear_caches(&self) {} } -pub trait KvTree: Send + Sync { +pub(crate) trait KvTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; - fn insert_batch(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()>; + fn insert_batch( + &self, + iter: &mut dyn Iterator, Vec)>, + ) -> Result<()>; fn remove(&self, key: &[u8]) -> Result<()>; @@ -58,14 +42,20 @@ pub trait KvTree: Send + Sync { ) -> Box, Vec)> + 'a>; fn increment(&self, key: &[u8]) -> Result>; - fn increment_batch(&self, iter: &mut dyn Iterator>) -> Result<()>; + fn increment_batch( + &self, + iter: &mut dyn Iterator>, + ) -> Result<()>; fn scan_prefix<'a>( &'a self, prefix: Vec, ) -> Box, Vec)> + 'a>; - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>>; + fn watch_prefix<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>>; fn clear(&self) -> Result<()> { for (key, _) in self.iter() { diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs deleted file mode 100644 index 9cca0975..00000000 --- a/src/database/abstraction/heed.rs +++ /dev/null @@ -1,194 +0,0 @@ -use super::{super::Config, watchers::Watchers}; -use crossbeam::channel::{bounded, Sender as ChannelSender}; -use threadpool::ThreadPool; - -use crate::{Error, Result}; -use std::{ - future::Future, - pin::Pin, - sync::{Arc, Mutex}, -}; - -use super::{DatabaseEngine, Tree}; - -type TupleOfBytes = (Vec, Vec); - -pub struct Engine { - env: heed::Env, - iter_pool: Mutex, -} - -pub struct EngineTree { - engine: Arc, - tree: Arc, - watchers: Watchers, -} - -fn convert_error(error: heed::Error) -> Error { - Error::HeedError { - error: error.to_string(), - } -} - -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { - let mut env_builder = heed::EnvOpenOptions::new(); - env_builder.map_size(1024 * 1024 * 1024 * 1024); // 1 Terabyte - env_builder.max_readers(126); - env_builder.max_dbs(128); - unsafe { - env_builder.flag(heed::flags::Flags::MdbWriteMap); - env_builder.flag(heed::flags::Flags::MdbMapAsync); - } - - Ok(Arc::new(Engine { - env: env_builder - .open(&config.database_path) - .map_err(convert_error)?, - iter_pool: Mutex::new(ThreadPool::new(10)), - })) - } - - fn open_tree(self: &Arc, name: &'static str) -> Result> { - // Creates the db if it doesn't exist already - Ok(Arc::new(EngineTree { - engine: Arc::clone(self), - tree: Arc::new( - self.env - .create_database(Some(name)) - .map_err(convert_error)?, - ), - watchers: Default::default(), - })) - } - - fn flush(self: &Arc) -> Result<()> { - self.env.force_sync().map_err(convert_error)?; - Ok(()) - } -} - -impl EngineTree { - fn iter_from_thread( - &self, - tree: Arc, - from: Vec, - backwards: bool, - ) -> Box + Send + Sync> { - let (s, r) = bounded::(100); - let engine = Arc::clone(&self.engine); - - let lock = self.engine.iter_pool.lock().await; - if lock.active_count() < lock.max_count() { - lock.execute(move || { - iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s); - }); - } else { - std::thread::spawn(move || { - iter_from_thread_work(tree, &engine.env.read_txn().unwrap(), from, backwards, &s); - }); - } - - Box::new(r.into_iter()) - } -} - -fn iter_from_thread_work( - tree: Arc, - txn: &heed::RoTxn<'_>, - from: Vec, - backwards: bool, - s: &ChannelSender<(Vec, Vec)>, -) { - if backwards { - for (k, v) in tree.rev_range(txn, ..=&*from).unwrap().map(|r| r.unwrap()) { - if s.send((k.to_vec(), v.to_vec())).is_err() { - return; - } - } - } else { - if from.is_empty() { - for (k, v) in tree.iter(txn).unwrap().map(|r| r.unwrap()) { - if s.send((k.to_vec(), v.to_vec())).is_err() { - return; - } - } - } else { - for (k, v) in tree.range(txn, &*from..).unwrap().map(|r| r.unwrap()) { - if s.send((k.to_vec(), v.to_vec())).is_err() { - return; - } - } - } - } -} - -impl Tree for EngineTree { - fn get(&self, key: &[u8]) -> Result>> { - let txn = self.engine.env.read_txn().map_err(convert_error)?; - Ok(self - .tree - .get(&txn, &key) - .map_err(convert_error)? - .map(|s| s.to_vec())) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - let mut txn = self.engine.env.write_txn().map_err(convert_error)?; - self.tree - .put(&mut txn, &key, &value) - .map_err(convert_error)?; - txn.commit().map_err(convert_error)?; - self.watchers.wake(key); - Ok(()) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - let mut txn = self.engine.env.write_txn().map_err(convert_error)?; - self.tree.delete(&mut txn, &key).map_err(convert_error)?; - txn.commit().map_err(convert_error)?; - Ok(()) - } - - fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { - self.iter_from(&[], false) - } - - fn iter_from( - &self, - from: &[u8], - backwards: bool, - ) -> Box, Vec)> + Send> { - self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards) - } - - fn increment(&self, key: &[u8]) -> Result> { - let mut txn = self.engine.env.write_txn().map_err(convert_error)?; - - let old = self.tree.get(&txn, &key).map_err(convert_error)?; - let new = - crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some"); - - self.tree - .put(&mut txn, &key, &&*new) - .map_err(convert_error)?; - - txn.commit().map_err(convert_error)?; - - Ok(new) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Vec)> + Send + 'a> { - Box::new( - self.iter_from(&prefix, false) - .take_while(move |(key, _)| key.starts_with(&prefix)), - ) - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - self.watchers.watch(prefix) - } -} diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs deleted file mode 100644 index da7d4cf0..00000000 --- a/src/database/abstraction/persy.rs +++ /dev/null @@ -1,197 +0,0 @@ -use crate::{ - database::{ - abstraction::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}, - Config, - }, - Result, -}; -use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; - -use std::{future::Future, pin::Pin, sync::Arc}; - -use tracing::warn; - -pub struct Engine { - persy: Persy, -} - -impl KeyValueDatabaseEngine for Arc { - fn open(config: &Config) -> Result { - let mut cfg = persy::Config::new(); - cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); - - let persy = OpenOptions::new() - .create(true) - .config(cfg) - .open(&format!("{}/db.persy", config.database_path))?; - Ok(Arc::new(Engine { persy })) - } - - fn open_tree(&self, name: &'static str) -> Result> { - // Create if it doesn't exist - if !self.persy.exists_index(name)? { - let mut tx = self.persy.begin()?; - tx.create_index::(name, ValueMode::Replace)?; - tx.prepare()?.commit()?; - } - - Ok(Arc::new(PersyTree { - persy: self.persy.clone(), - name: name.to_owned(), - watchers: Watchers::default(), - })) - } - - fn flush(&self) -> Result<()> { - Ok(()) - } -} - -pub struct PersyTree { - persy: Persy, - name: String, - watchers: Watchers, -} - -impl PersyTree { - fn begin(&self) -> Result { - Ok(self - .persy - .begin_with(TransactionConfig::new().set_background_sync(true))?) - } -} - -impl KvTree for PersyTree { - fn get(&self, key: &[u8]) -> Result>> { - let result = self - .persy - .get::(&self.name, &ByteVec::from(key))? - .next() - .map(|v| (*v).to_owned()); - Ok(result) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; - self.watchers.wake(key); - Ok(()) - } - - fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { - let mut tx = self.begin()?; - for (key, value) in iter { - tx.put::( - &self.name, - ByteVec::from(key.clone()), - ByteVec::from(value), - )?; - } - tx.prepare()?.commit()?; - Ok(()) - } - - fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { - let mut tx = self.begin()?; - for key in iter { - let old = tx - .get::(&self.name, &ByteVec::from(key.clone()))? - .next() - .map(|v| (*v).to_owned()); - let new = crate::utils::increment(old.as_deref()).unwrap(); - tx.put::(&self.name, ByteVec::from(key), ByteVec::from(new))?; - } - tx.prepare()?.commit()?; - Ok(()) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - let mut tx = self.begin()?; - tx.remove::(&self.name, ByteVec::from(key), None)?; - tx.prepare()?.commit()?; - Ok(()) - } - - fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { - let iter = self.persy.range::(&self.name, ..); - match iter { - Ok(iter) => Box::new(iter.filter_map(|(k, v)| { - v.into_iter() - .map(|val| ((*k).to_owned(), (*val).to_owned())) - .next() - })), - Err(e) => { - warn!("error iterating {:?}", e); - Box::new(std::iter::empty()) - } - } - } - - fn iter_from<'a>( - &'a self, - from: &[u8], - backwards: bool, - ) -> Box, Vec)> + 'a> { - let range = if backwards { - self.persy - .range::(&self.name, ..=ByteVec::from(from)) - } else { - self.persy - .range::(&self.name, ByteVec::from(from)..) - }; - match range { - Ok(iter) => { - let map = iter.filter_map(|(k, v)| { - v.into_iter() - .map(|val| ((*k).to_owned(), (*val).to_owned())) - .next() - }); - if backwards { - Box::new(map.rev()) - } else { - Box::new(map) - } - } - Err(e) => { - warn!("error iterating with prefix {:?}", e); - Box::new(std::iter::empty()) - } - } - } - - fn increment(&self, key: &[u8]) -> Result> { - self.increment_batch(&mut Some(key.to_owned()).into_iter())?; - Ok(self.get(key)?.unwrap()) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Vec)> + 'a> { - let range_prefix = ByteVec::from(prefix.clone()); - let range = self - .persy - .range::(&self.name, range_prefix..); - - match range { - Ok(iter) => { - let owned_prefix = prefix.clone(); - Box::new( - iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) - .filter_map(|(k, v)| { - v.into_iter() - .map(|val| ((*k).to_owned(), (*val).to_owned())) - .next() - }), - ) - } - Err(e) => { - warn!("error scanning prefix {:?}", e); - Box::new(std::iter::empty()) - } - } - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - self.watchers.watch(prefix) - } -} diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 447ee038..90cdb06a 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,27 +1,39 @@ -use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; -use crate::{utils, Result}; use std::{ + collections::HashSet, future::Future, pin::Pin, - sync::{Arc, RwLock}, + sync::{Arc, Mutex, RwLock}, }; -pub struct Engine { - rocks: rocksdb::DBWithThreadMode, +use rocksdb::{ + BlockBasedOptions, BoundColumnFamily, Cache, ColumnFamilyDescriptor, + DBCompactionStyle, DBCompressionType, DBRecoveryMode, DBWithThreadMode, + Direction, IteratorMode, MultiThreaded, Options, ReadOptions, WriteOptions, +}; +use tracing::Level; + +use super::{ + super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree, +}; +use crate::{utils, Result}; + +pub(crate) struct Engine { + rocks: DBWithThreadMode, max_open_files: i32, - cache: rocksdb::Cache, - old_cfs: Vec, + cache: Cache, + old_cfs: HashSet, + new_cfs: Mutex>, } -pub struct RocksDbEngineTree<'a> { +pub(crate) struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, watchers: Watchers, write_lock: RwLock<()>, } -fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { - let mut block_based_options = rocksdb::BlockBasedOptions::default(); +fn db_options(max_open_files: i32, rocksdb_cache: &Cache) -> Options { + let mut block_based_options = BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); block_based_options.set_bloom_filter(10.0, false); block_based_options.set_block_size(4 * 1024); @@ -29,19 +41,20 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O block_based_options.set_pin_l0_filter_and_index_blocks_in_cache(true); block_based_options.set_optimize_filters_for_memory(true); - let mut db_opts = rocksdb::Options::default(); + let mut db_opts = Options::default(); db_opts.set_block_based_table_factory(&block_based_options); db_opts.create_if_missing(true); - db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts + .increase_parallelism(num_cpus::get().try_into().unwrap_or(i32::MAX)); db_opts.set_max_open_files(max_open_files); - db_opts.set_compression_type(rocksdb::DBCompressionType::Lz4); - db_opts.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.set_compression_type(DBCompressionType::Lz4); + db_opts.set_bottommost_compression_type(DBCompressionType::Zstd); + db_opts.set_compaction_style(DBCompactionStyle::Level); // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning db_opts.set_level_compaction_dynamic_level_bytes(true); db_opts.set_max_background_jobs(6); - db_opts.set_bytes_per_sync(1048576); + db_opts.set_bytes_per_sync(1_048_576); // https://github.com/facebook/rocksdb/issues/849 db_opts.set_keep_log_file_num(100); @@ -51,49 +64,75 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O // Unclean shutdowns of a Matrix homeserver are likely to be fine when // recovered in this manner as it's likely any lost information will be // restored via federation. - db_opts.set_wal_recovery_mode(rocksdb::DBRecoveryMode::TolerateCorruptedTailRecords); + db_opts.set_wal_recovery_mode(DBRecoveryMode::TolerateCorruptedTailRecords); db_opts } impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { - let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; - let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes); + #[allow( + clippy::as_conversions, + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + let cache_capacity_bytes = + (config.database.cache_capacity_mb * 1024.0 * 1024.0) as usize; + let rocksdb_cache = Cache::new_lru_cache(cache_capacity_bytes); - let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); + let db_opts = + db_options(config.database.rocksdb_max_open_files, &rocksdb_cache); - let cfs = rocksdb::DBWithThreadMode::::list_cf( + let cfs = DBWithThreadMode::::list_cf( &db_opts, - &config.database_path, + &config.database.path, ) + .map(|x| x.into_iter().collect::>()) .unwrap_or_default(); - let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( + let db = DBWithThreadMode::::open_cf_descriptors( &db_opts, - &config.database_path, + &config.database.path, cfs.iter().map(|name| { - rocksdb::ColumnFamilyDescriptor::new( + ColumnFamilyDescriptor::new( name, - db_options(config.rocksdb_max_open_files, &rocksdb_cache), + db_options( + config.database.rocksdb_max_open_files, + &rocksdb_cache, + ), ) }), )?; Ok(Arc::new(Engine { rocks: db, - max_open_files: config.rocksdb_max_open_files, + max_open_files: config.database.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, + new_cfs: Mutex::default(), })) } fn open_tree(&self, name: &'static str) -> Result> { - if !self.old_cfs.contains(&name.to_owned()) { + let mut new_cfs = + self.new_cfs.lock().expect("lock should not be poisoned"); + + let created_already = !new_cfs.insert(name); + + assert!( + // userroomid_highlightcount is special-cased because it is an + // existing violation of this check that happens to work anyway. We + // should write a database migration to obviate the need for this. + !(created_already && name != "userroomid_highlightcount"), + "detected attempt to alias column family: {name}", + ); + + // Remove `&& !created_already` when the above is addressed + if !self.old_cfs.contains(name) && !created_already { // Create if it didn't exist - let _ = self - .rocks - .create_cf(name, &db_options(self.max_open_files, &self.cache)); + self.rocks + .create_cf(name, &db_options(self.max_open_files, &self.cache)) + .expect("should be able to create column family"); } Ok(Arc::new(RocksDbEngineTree { @@ -103,52 +142,27 @@ impl KeyValueDatabaseEngine for Arc { write_lock: RwLock::new(()), })) } - - fn flush(&self) -> Result<()> { - // TODO? - Ok(()) - } - - fn memory_usage(&self) -> Result { - let stats = - rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; - Ok(format!( - "Approximate memory usage of all the mem-tables: {:.3} MB\n\ - Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ - Approximate memory usage of all the table readers: {:.3} MB\n\ - Approximate memory usage by cache: {:.3} MB\n\ - Approximate memory usage by cache pinned: {:.3} MB\n\ - ", - stats.mem_table_total as f64 / 1024.0 / 1024.0, - stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, - stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, - stats.cache_total as f64 / 1024.0 / 1024.0, - self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, - )) - } - - fn clear_caches(&self) {} } impl RocksDbEngineTree<'_> { - fn cf(&self) -> Arc> { + fn cf(&self) -> Arc> { self.db.rocks.cf_handle(self.name).unwrap() } } impl KvTree for RocksDbEngineTree<'_> { + #[tracing::instrument(level = Level::TRACE, skip_all)] fn get(&self, key: &[u8]) -> Result>> { - let readoptions = rocksdb::ReadOptions::default(); + let readoptions = ReadOptions::default(); Ok(self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - let writeoptions = rocksdb::WriteOptions::default(); + let writeoptions = WriteOptions::default(); let lock = self.write_lock.read().unwrap(); - self.db - .rocks - .put_cf_opt(&self.cf(), key, value, &writeoptions)?; + self.db.rocks.put_cf_opt(&self.cf(), key, value, &writeoptions)?; drop(lock); self.watchers.wake(key); @@ -156,43 +170,45 @@ impl KvTree for RocksDbEngineTree<'_> { Ok(()) } - fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { - let writeoptions = rocksdb::WriteOptions::default(); + #[tracing::instrument(level = Level::TRACE, skip_all)] + fn insert_batch( + &self, + iter: &mut dyn Iterator, Vec)>, + ) -> Result<()> { + let writeoptions = WriteOptions::default(); for (key, value) in iter { - self.db - .rocks - .put_cf_opt(&self.cf(), key, value, &writeoptions)?; + self.db.rocks.put_cf_opt(&self.cf(), key, value, &writeoptions)?; } Ok(()) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn remove(&self, key: &[u8]) -> Result<()> { - let writeoptions = rocksdb::WriteOptions::default(); - Ok(self - .db - .rocks - .delete_cf_opt(&self.cf(), key, &writeoptions)?) + let writeoptions = WriteOptions::default(); + Ok(self.db.rocks.delete_cf_opt(&self.cf(), key, &writeoptions)?) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { - let readoptions = rocksdb::ReadOptions::default(); + let readoptions = ReadOptions::default(); Box::new( self.db .rocks - .iterator_cf_opt(&self.cf(), readoptions, rocksdb::IteratorMode::Start) - .map(|r| r.unwrap()) + .iterator_cf_opt(&self.cf(), readoptions, IteratorMode::Start) + .map(Result::unwrap) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn iter_from<'a>( &'a self, from: &[u8], backwards: bool, ) -> Box, Vec)> + 'a> { - let readoptions = rocksdb::ReadOptions::default(); + let readoptions = ReadOptions::default(); Box::new( self.db @@ -200,48 +216,50 @@ impl KvTree for RocksDbEngineTree<'_> { .iterator_cf_opt( &self.cf(), readoptions, - rocksdb::IteratorMode::From( + IteratorMode::From( from, if backwards { - rocksdb::Direction::Reverse + Direction::Reverse } else { - rocksdb::Direction::Forward + Direction::Forward }, ), ) - .map(|r| r.unwrap()) + .map(Result::unwrap) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn increment(&self, key: &[u8]) -> Result> { - let readoptions = rocksdb::ReadOptions::default(); - let writeoptions = rocksdb::WriteOptions::default(); + let readoptions = ReadOptions::default(); + let writeoptions = WriteOptions::default(); let lock = self.write_lock.write().unwrap(); let old = self.db.rocks.get_cf_opt(&self.cf(), key, &readoptions)?; - let new = utils::increment(old.as_deref()).unwrap(); - self.db - .rocks - .put_cf_opt(&self.cf(), key, &new, &writeoptions)?; + let new = utils::increment(old.as_deref()); + self.db.rocks.put_cf_opt(&self.cf(), key, &new, &writeoptions)?; drop(lock); Ok(new) } - fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { - let readoptions = rocksdb::ReadOptions::default(); - let writeoptions = rocksdb::WriteOptions::default(); + #[tracing::instrument(level = Level::TRACE, skip_all)] + fn increment_batch( + &self, + iter: &mut dyn Iterator>, + ) -> Result<()> { + let readoptions = ReadOptions::default(); + let writeoptions = WriteOptions::default(); let lock = self.write_lock.write().unwrap(); for key in iter { - let old = self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?; - let new = utils::increment(old.as_deref()).unwrap(); - self.db - .rocks - .put_cf_opt(&self.cf(), key, new, &writeoptions)?; + let old = + self.db.rocks.get_cf_opt(&self.cf(), &key, &readoptions)?; + let new = utils::increment(old.as_deref()); + self.db.rocks.put_cf_opt(&self.cf(), key, new, &writeoptions)?; } drop(lock); @@ -249,11 +267,12 @@ impl KvTree for RocksDbEngineTree<'_> { Ok(()) } + #[tracing::instrument(level = Level::TRACE, skip_all)] fn scan_prefix<'a>( &'a self, prefix: Vec, ) -> Box, Vec)> + 'a> { - let readoptions = rocksdb::ReadOptions::default(); + let readoptions = ReadOptions::default(); Box::new( self.db @@ -261,15 +280,19 @@ impl KvTree for RocksDbEngineTree<'_> { .iterator_cf_opt( &self.cf(), readoptions, - rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), + IteratorMode::From(&prefix, Direction::Forward), ) - .map(|r| r.unwrap()) + .map(Result::unwrap) .map(|(k, v)| (Vec::from(k), Vec::from(v))) .take_while(move |(k, _)| k.starts_with(&prefix)), ) } - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + #[tracing::instrument(level = Level::TRACE, skip_all)] + fn watch_prefix<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } } diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs deleted file mode 100644 index 87defc57..00000000 --- a/src/database/abstraction/sled.rs +++ /dev/null @@ -1,127 +0,0 @@ -use super::super::Config; -use crate::{utils, Result}; -use std::{future::Future, pin::Pin, sync::Arc}; -use tracing::warn; - -use super::{DatabaseEngine, Tree}; - -pub struct Engine(sled::Db); - -pub struct SledEngineTree(sled::Tree); - -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { - Ok(Arc::new(Engine( - sled::Config::default() - .path(&config.database_path) - .cache_capacity((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64) - .use_compression(true) - .open()?, - ))) - } - - fn open_tree(self: &Arc, name: &'static str) -> Result> { - Ok(Arc::new(SledEngineTree(self.0.open_tree(name)?))) - } - - fn flush(self: &Arc) -> Result<()> { - Ok(()) // noop - } -} - -impl Tree for SledEngineTree { - fn get(&self, key: &[u8]) -> Result>> { - Ok(self.0.get(key)?.map(|v| v.to_vec())) - } - - fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { - self.0.insert(key, value)?; - Ok(()) - } - - fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { - for (key, value) in iter { - self.0.insert(key, value)?; - } - - Ok(()) - } - - fn remove(&self, key: &[u8]) -> Result<()> { - self.0.remove(key)?; - Ok(()) - } - - fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { - Box::new( - self.0 - .iter() - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())), - ) - } - - fn iter_from( - &self, - from: &[u8], - backwards: bool, - ) -> Box, Vec)>> { - let iter = if backwards { - self.0.range(..=from) - } else { - self.0.range(from..) - }; - - let iter = iter - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); - - if backwards { - Box::new(iter.rev()) - } else { - Box::new(iter) - } - } - - fn increment(&self, key: &[u8]) -> Result> { - Ok(self - .0 - .update_and_fetch(key, utils::increment) - .map(|o| o.expect("increment always sets a value").to_vec())?) - } - - fn scan_prefix<'a>( - &'a self, - prefix: Vec, - ) -> Box, Vec)> + 'a> { - let iter = self - .0 - .scan_prefix(prefix) - .filter_map(|r| { - if let Err(e) = &r { - warn!("Error: {}", e); - } - r.ok() - }) - .map(|(k, v)| (k.to_vec().into(), v.to_vec().into())); - - Box::new(iter) - } - - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let prefix = prefix.to_vec(); - Box::pin(async move { - self.0.watch_prefix(prefix).await; - }) - } -} diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 222a8433..402e8621 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,7 +1,3 @@ -use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; -use crate::{database::Config, Result}; -use parking_lot::{Mutex, MutexGuard}; -use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ cell::RefCell, future::Future, @@ -9,17 +5,25 @@ use std::{ pin::Pin, sync::Arc, }; + +use parking_lot::{Mutex, MutexGuard}; +use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use thread_local::ThreadLocal; use tracing::debug; +use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; +use crate::{database::Config, Result}; + thread_local! { - static READ_CONNECTION: RefCell> = RefCell::new(None); - static READ_CONNECTION_ITERATOR: RefCell> = RefCell::new(None); + static READ_CONNECTION: RefCell> = + const { RefCell::new(None) }; + static READ_CONNECTION_ITERATOR: RefCell> = + const { RefCell::new(None) }; } struct PreparedStatementIterator<'a> { - pub iterator: Box + 'a>, - pub _statement_ref: NonAliasingBox>, + pub(crate) iterator: Box + 'a>, + pub(crate) _statement_ref: AliasableBox>, } impl Iterator for PreparedStatementIterator<'_> { @@ -30,14 +34,29 @@ impl Iterator for PreparedStatementIterator<'_> { } } -struct NonAliasingBox(*mut T); -impl Drop for NonAliasingBox { +struct AliasableBox(*mut T); +impl Drop for AliasableBox { fn drop(&mut self) { - drop(unsafe { Box::from_raw(self.0) }); + // SAFETY: This is cursed and relies on non-local reasoning. + // + // In order for this to be safe: + // + // * All aliased references to this value must have been dropped first, + // for example by coming after its referrers in struct fields, because + // struct fields are automatically dropped in order from top to bottom + // in the absence of an explicit Drop impl. Otherwise, the referrers + // may read into deallocated memory. + // * This type must not be copyable or cloneable. Otherwise, double-free + // can occur. + // + // These conditions are met, but again, note that changing safe code in + // this module can result in unsoundness if any of these constraints are + // violated. + unsafe { drop(Box::from_raw(self.0)) } } } -pub struct Engine { +pub(crate) struct Engine { writer: Mutex, read_conn_tls: ThreadLocal, read_iterator_conn_tls: ThreadLocal, @@ -53,7 +72,11 @@ impl Engine { conn.pragma_update(Some(Main), "page_size", 2048)?; conn.pragma_update(Some(Main), "journal_mode", "WAL")?; conn.pragma_update(Some(Main), "synchronous", "NORMAL")?; - conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?; + conn.pragma_update( + Some(Main), + "cache_size", + -i64::from(cache_size_kb), + )?; conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?; Ok(conn) @@ -64,35 +87,55 @@ impl Engine { } fn read_lock(&self) -> &Connection { - self.read_conn_tls - .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) + self.read_conn_tls.get_or(|| { + Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap() + }) } fn read_lock_iterator(&self) -> &Connection { - self.read_iterator_conn_tls - .get_or(|| Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap()) + self.read_iterator_conn_tls.get_or(|| { + Self::prepare_conn(&self.path, self.cache_size_per_thread).unwrap() + }) } - pub fn flush_wal(self: &Arc) -> Result<()> { - self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", "RESTART")?; + pub(crate) fn flush_wal(self: &Arc) -> Result<()> { + self.write_lock().pragma_update( + Some(Main), + "wal_checkpoint", + "RESTART", + )?; Ok(()) } } impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { - let path = Path::new(&config.database_path).join("conduit.db"); + let path = config.database.path.join(format!( + "{}.db", + if config.conduit_compat { + "conduit" + } else { + "grapevine" + } + )); // calculates cache-size per permanent connection // 1. convert MB to KiB - // 2. divide by permanent connections + permanent iter connections + write connection + // 2. divide by permanent connections + permanent iter connections + + // write connection // 3. round down to nearest integer - let cache_size_per_thread: u32 = ((config.db_cache_capacity_mb * 1024.0) - / ((num_cpus::get().max(1) * 2) + 1) as f64) - as u32; + #[allow( + clippy::as_conversions, + clippy::cast_possible_truncation, + clippy::cast_precision_loss, + clippy::cast_sign_loss + )] + let cache_size_per_thread = + ((config.database.cache_capacity_mb * 1024.0) + / ((num_cpus::get() as f64 * 2.0) + 1.0)) as u32; - let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); + let writer = + Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { writer, @@ -106,7 +149,13 @@ impl KeyValueDatabaseEngine for Arc { } fn open_tree(&self, name: &str) -> Result> { - self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), [])?; + self.write_lock().execute( + &format!( + "CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY \ + KEY, \"value\" BLOB NOT NULL )" + ), + [], + )?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), @@ -115,17 +164,12 @@ impl KeyValueDatabaseEngine for Arc { })) } - fn flush(&self) -> Result<()> { - // we enabled PRAGMA synchronous=normal, so this should not be necessary - Ok(()) - } - fn cleanup(&self) -> Result<()> { self.flush_wal() } } -pub struct SqliteTable { +pub(crate) struct SqliteTable { engine: Arc, name: String, watchers: Watchers, @@ -134,14 +178,26 @@ pub struct SqliteTable { type TupleOfBytes = (Vec, Vec); impl SqliteTable { - fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { + fn get_with_guard( + &self, + guard: &Connection, + key: &[u8], + ) -> Result>> { Ok(guard - .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? + .prepare( + format!("SELECT value FROM {} WHERE key = ?", self.name) + .as_str(), + )? .query_row([key], |row| row.get(0)) .optional()?) } - fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { + fn insert_with_guard( + &self, + guard: &Connection, + key: &[u8], + value: &[u8], + ) -> Result<()> { guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -153,7 +209,7 @@ impl SqliteTable { Ok(()) } - pub fn iter_with_guard<'a>( + pub(crate) fn iter_with_guard<'a>( &'a self, guard: &'a Connection, ) -> Box + 'a> { @@ -166,15 +222,13 @@ impl SqliteTable { .unwrap(), )); - let statement_ref = NonAliasingBox(statement); - - //let name = self.name.clone(); + let statement_ref = AliasableBox(statement); let iterator = Box::new( statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() - .map(move |r| r.unwrap()), + .map(Result::unwrap), ); Box::new(PreparedStatementIterator { @@ -197,7 +251,10 @@ impl KvTree for SqliteTable { Ok(()) } - fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + fn insert_batch( + &self, + iter: &mut dyn Iterator, Vec)>, + ) -> Result<()> { let guard = self.engine.write_lock(); guard.execute("BEGIN", [])?; @@ -211,14 +268,16 @@ impl KvTree for SqliteTable { Ok(()) } - fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + fn increment_batch( + &self, + iter: &mut dyn Iterator>, + ) -> Result<()> { let guard = self.engine.write_lock(); guard.execute("BEGIN", [])?; for key in iter { let old = self.get_with_guard(&guard, &key)?; - let new = crate::utils::increment(old.as_deref()) - .expect("utils::increment always returns Some"); + let new = crate::utils::increment(old.as_deref()); self.insert_with_guard(&guard, &key, &new)?; } guard.execute("COMMIT", [])?; @@ -251,27 +310,29 @@ impl KvTree for SqliteTable { backwards: bool, ) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); - let from = from.to_vec(); // TODO change interface? - - //let name = self.name.clone(); + // TODO change interface? + let from = from.to_vec(); if backwards { let statement = Box::leak(Box::new( guard .prepare(&format!( - "SELECT key, value FROM {} WHERE key <= ? ORDER BY key DESC", + "SELECT key, value FROM {} WHERE key <= ? ORDER BY \ + key DESC", &self.name )) .unwrap(), )); - let statement_ref = NonAliasingBox(statement); + let statement_ref = AliasableBox(statement); let iterator = Box::new( statement - .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .query_map([from], |row| { + Ok((row.get_unwrap(0), row.get_unwrap(1))) + }) .unwrap() - .map(move |r| r.unwrap()), + .map(Result::unwrap), ); Box::new(PreparedStatementIterator { iterator, @@ -281,19 +342,22 @@ impl KvTree for SqliteTable { let statement = Box::leak(Box::new( guard .prepare(&format!( - "SELECT key, value FROM {} WHERE key >= ? ORDER BY key ASC", + "SELECT key, value FROM {} WHERE key >= ? ORDER BY \ + key ASC", &self.name )) .unwrap(), )); - let statement_ref = NonAliasingBox(statement); + let statement_ref = AliasableBox(statement); let iterator = Box::new( statement - .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) + .query_map([from], |row| { + Ok((row.get_unwrap(0), row.get_unwrap(1))) + }) .unwrap() - .map(move |r| r.unwrap()), + .map(Result::unwrap), ); Box::new(PreparedStatementIterator { @@ -308,22 +372,27 @@ impl KvTree for SqliteTable { let old = self.get_with_guard(&guard, key)?; - let new = - crate::utils::increment(old.as_deref()).expect("utils::increment always returns Some"); + let new = crate::utils::increment(old.as_deref()); self.insert_with_guard(&guard, key, &new)?; Ok(new) } - fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box + 'a> { Box::new( self.iter_from(&prefix, false) .take_while(move |(key, _)| key.starts_with(&prefix)), ) } - fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + fn watch_prefix<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 01156abd..3f33e537 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -4,12 +4,14 @@ use std::{ pin::Pin, sync::RwLock, }; + use tokio::sync::watch; #[derive(Default)] pub(super) struct Watchers { #[allow(clippy::type_complexity)] - watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, + watchers: + RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, } impl Watchers { @@ -17,10 +19,11 @@ impl Watchers { &'a self, prefix: &[u8], ) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { + let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) + { hash_map::Entry::Occupied(o) => o.get().1.clone(), hash_map::Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(()); + let (tx, rx) = watch::channel(()); v.insert((tx, rx.clone())); rx } @@ -31,6 +34,7 @@ impl Watchers { rx.changed().await.unwrap(); }) } + pub(super) fn wake(&self, key: &[u8]) { let watchers = self.watchers.read().unwrap(); let mut triggered = Vec::new(); @@ -47,9 +51,9 @@ impl Watchers { let mut watchers = self.watchers.write().unwrap(); for prefix in triggered { if let Some(tx) = watchers.remove(prefix) { - let _ = tx.0.send(()); + tx.0.send(()).expect("channel should still be open"); } } - }; + } } } diff --git a/src/database/key_value/mod.rs b/src/database/key_value.rs similarity index 86% rename from src/database/key_value/mod.rs rename to src/database/key_value.rs index c4496af8..098f3391 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value.rs @@ -1,10 +1,8 @@ mod account_data; -//mod admin; mod appservice; mod globals; mod key_backups; mod media; -//mod pdu; mod pusher; mod rooms; mod sending; diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 970b36b5..49231a2b 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,57 +1,62 @@ use std::collections::HashMap; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, +use ruma::{api::client::error::ErrorKind, RoomId, UserId}; +use serde::Deserialize; +use serde_json::value::RawValue; + +use crate::{ + database::KeyValueDatabase, service, services, utils, Error, Result, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; - impl service::account_data::Data for KeyValueDatabase { - /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, - event_type: RoomAccountDataEventType, - data: &serde_json::Value, + event_type: &str, + data: &RawValue, ) -> Result<()> { + // Allowed because we just use this type to validate the schema, and + // don't read the fields. + #[allow(dead_code)] + #[derive(Deserialize)] + struct ExtractEventFields<'a> { + #[serde(rename = "type")] + event_type: &'a str, + content: &'a RawValue, + } + let mut prefix = room_id - .map(|r| r.to_string()) + .map(ToString::to_string) .unwrap_or_default() .as_bytes() .to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); + roomuserdataid + .extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + roomuserdataid.push(0xFF); + roomuserdataid.extend_from_slice(event_type.as_bytes()); let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); + key.extend_from_slice(event_type.as_bytes()); - if data.get("type").is_none() || data.get("content").is_none() { + if serde_json::from_str::>(data.get()).is_err() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Account data doesn't have all required fields.", )); } - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&data).expect("to_vec always works on json values"), - )?; + self.roomuserdataid_accountdata + .insert(&roomuserdataid, data.get().as_bytes())?; let prev = self.roomusertype_roomuserdataid.get(&key)?; - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; + self.roomusertype_roomuserdataid.insert(&key, &roomuserdataid)?; // Remove old entry if let Some(prev) = prev { @@ -61,30 +66,26 @@ impl service::account_data::Data for KeyValueDatabase { Ok(()) } - /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, kind))] fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: RoomAccountDataEventType, - ) -> Result>> { + kind: &str, + ) -> Result>> { let mut key = room_id - .map(|r| r.to_string()) + .map(ToString::to_string) .unwrap_or_default() .as_bytes() .to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); + key.push(0xFF); + key.extend_from_slice(kind.as_bytes()); self.roomusertype_roomuserdataid .get(&key)? .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() + self.roomuserdataid_accountdata.get(&roomuserdataid).transpose() }) .transpose()? .map(|data| { @@ -94,24 +95,22 @@ impl service::account_data::Data for KeyValueDatabase { .transpose() } - /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip(self, room_id, user_id, since))] fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id - .map(|r| r.to_string()) + .map(ToString::to_string) .unwrap_or_default() .as_bytes() .to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); // Skip the data that's exactly at since, because we sent that last time let mut first_possible = prefix.clone(); @@ -123,15 +122,23 @@ impl service::account_data::Data for KeyValueDatabase { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - RoomAccountDataEventType::from( - utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( - || Error::bad_database("RoomUserData ID in db is invalid."), - )?) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ), - serde_json::from_slice::>(&v).map_err(|_| { - Error::bad_database("Database contains invalid account data.") + utils::string_from_bytes( + k.rsplit(|&b| b == 0xFF).next().ok_or_else(|| { + Error::bad_database( + "RoomUserData ID in db is invalid.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database("RoomUserData ID in db is invalid.") })?, + serde_json::from_slice::>(&v).map_err( + |_| { + Error::bad_database( + "Database contains invalid account data.", + ) + }, + )?, )) }) { diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index b547e66a..b1ef80c0 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -20,8 +20,7 @@ impl service::appservice::Data for KeyValueDatabase { /// /// * `service_name` - the name you send to register the service previously fn unregister_appservice(&self, service_name: &str) -> Result<()> { - self.id_appserviceregistrations - .remove(service_name.as_bytes())?; + self.id_appserviceregistrations.remove(service_name.as_bytes())?; Ok(()) } @@ -30,25 +29,30 @@ impl service::appservice::Data for KeyValueDatabase { .get(id.as_bytes())? .map(|bytes| { serde_yaml::from_slice(&bytes).map_err(|_| { - Error::bad_database("Invalid registration bytes in id_appserviceregistrations.") + Error::bad_database( + "Invalid registration bytes in \ + id_appserviceregistrations.", + ) }) }) .transpose() } - fn iter_ids<'a>(&'a self) -> Result> + 'a>> { - Ok(Box::new(self.id_appserviceregistrations.iter().map( - |(id, _)| { - utils::string_from_bytes(&id).map_err(|_| { - Error::bad_database("Invalid id bytes in id_appserviceregistrations.") - }) - }, - ))) + fn iter_ids<'a>( + &'a self, + ) -> Result> + 'a>> { + Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { + utils::string_from_bytes(&id).map_err(|_| { + Error::bad_database( + "Invalid id bytes in id_appserviceregistrations.", + ) + }) + }))) } fn all(&self) -> Result> { self.iter_ids()? - .filter_map(|id| id.ok()) + .filter_map(Result::ok) .map(move |id| { Ok(( id.clone(), diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 2851ce53..e3293571 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,18 +1,18 @@ -use std::collections::{BTreeMap, HashMap}; - use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; -use lru_cache::LruCache; use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, + api::federation::discovery::{OldVerifyKey, ServerSigningKeys}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, + DeviceId, ServerName, UserId, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, globals::SigningKeys}, + services, utils, Error, Result, +}; -pub const COUNTER: &[u8] = b"c"; -pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u"; +pub(crate) const COUNTER: &[u8] = b"c"; #[async_trait] impl service::globals::Data for KeyValueDatabase { @@ -28,35 +28,23 @@ impl service::globals::Data for KeyValueDatabase { }) } - fn last_check_for_updates_id(&self) -> Result { - self.global - .get(LAST_CHECK_FOR_UPDATES_COUNT)? - .map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("last check for updates count has invalid bytes.") - }) - }) - } - - fn update_check_for_updates_id(&self, id: u64) -> Result<()> { - self.global - .insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?; - - Ok(()) - } - - async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + #[tracing::instrument(skip(self))] + async fn watch( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result<()> { let userid_bytes = user_id.as_bytes().to_vec(); let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xff); + userid_prefix.push(0xFF); let mut userdeviceid_prefix = userid_prefix.clone(); userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xff); + userdeviceid_prefix.push(0xFF); let mut futures = FuturesUnordered::new(); - // Return when *any* user changed his key + // Return when *any* user changed their key // TODO: only send for user they share a room with futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); @@ -64,17 +52,17 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); futures.push( - self.userroomid_notificationcount - .watch_prefix(&userid_prefix), + self.userroomid_notificationcount.watch_prefix(&userid_prefix), ); - futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); + futures + .push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); // Events for rooms we are in for room_id in services() .rooms .state_cache .rooms_joined(user_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) { let short_roomid = services() .rooms @@ -83,22 +71,30 @@ impl service::globals::Data for KeyValueDatabase { .ok() .flatten() .expect("room exists") + .get() .to_be_bytes() .to_vec(); let roomid_bytes = room_id.as_bytes().to_vec(); let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xff); + roomid_prefix.push(0xFF); // PDUs futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs - futures.push(Box::into_pin(Box::new(async move { - let _result = services().rooms.edus.typing.wait_for_update(&room_id).await; - }))); + futures.push(Box::pin(async move { + let _result = services() + .rooms + .edus + .typing + .wait_for_update(&room_id) + .await; + })); - futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); + futures.push( + self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix), + ); // Key changes futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); @@ -108,12 +104,11 @@ impl service::globals::Data for KeyValueDatabase { roomuser_prefix.extend_from_slice(&userid_prefix); futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), + self.roomusertype_roomuserdataid.watch_prefix(&roomuser_prefix), ); } - let mut globaluserdata_prefix = vec![0xff]; + let mut globaluserdata_prefix = vec![0xFF]; globaluserdata_prefix.extend_from_slice(&userid_prefix); futures.push( @@ -125,7 +120,8 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); // One time keys - futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); + futures + .push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); futures.push(Box::pin(services().globals.rotate.watch())); @@ -136,70 +132,7 @@ impl service::globals::Data for KeyValueDatabase { } fn cleanup(&self) -> Result<()> { - self._db.cleanup() - } - - fn memory_usage(&self) -> String { - let pdu_cache = self.pdu_cache.lock().unwrap().len(); - let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len(); - let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len(); - let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len(); - let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len(); - let our_real_users_cache = self.our_real_users_cache.read().unwrap().len(); - let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len(); - let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len(); - - let mut response = format!( - "\ -pdu_cache: {pdu_cache} -shorteventid_cache: {shorteventid_cache} -auth_chain_cache: {auth_chain_cache} -eventidshort_cache: {eventidshort_cache} -statekeyshort_cache: {statekeyshort_cache} -our_real_users_cache: {our_real_users_cache} -appservice_in_room_cache: {appservice_in_room_cache} -lasttimelinecount_cache: {lasttimelinecount_cache}\n" - ); - if let Ok(db_stats) = self._db.memory_usage() { - response += &db_stats; - } - - response - } - - fn clear_caches(&self, amount: u32) { - if amount > 0 { - let c = &mut *self.pdu_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 1 { - let c = &mut *self.shorteventid_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 2 { - let c = &mut *self.auth_chain_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 3 { - let c = &mut *self.eventidshort_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 4 { - let c = &mut *self.statekeyshort_cache.lock().unwrap(); - *c = LruCache::new(c.capacity()); - } - if amount > 5 { - let c = &mut *self.our_real_users_cache.write().unwrap(); - *c = HashMap::new(); - } - if amount > 6 { - let c = &mut *self.appservice_in_room_cache.write().unwrap(); - *c = HashMap::new(); - } - if amount > 7 { - let c = &mut *self.lasttimelinecount_cache.lock().unwrap(); - *c = HashMap::new(); - } + self.db.cleanup() } fn load_keypair(&self) -> Result { @@ -209,100 +142,148 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n" self.global.insert(b"keypair", &keypair)?; Ok::<_, Error>(keypair) }, - |s| Ok(s.to_vec()), + |s| Ok(s.clone()), )?; - let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); + let mut parts = keypair_bytes.splitn(2, |&b| b == 0xFF); utils::string_from_bytes( // 1. version - parts - .next() - .expect("splitn always returns at least one element"), + parts.next().expect("splitn always returns at least one element"), ) .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) .and_then(|version| { // 2. key parts .next() - .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) + .ok_or_else(|| { + Error::bad_database("Invalid keypair format in database.") + }) .map(|key| (version, key)) }) .and_then(|(version, key)| { - Ed25519KeyPair::from_der(key, version) - .map_err(|_| Error::bad_database("Private or public keys are invalid.")) + Ed25519KeyPair::from_der(key, version).map_err(|_| { + Error::bad_database("Private or public keys are invalid.") + }) }) } + fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") } - fn add_signing_key( + fn add_signing_key_from_trusted_server( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + ) -> Result { + let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); + Ok( + if let Some(mut prev_keys) = prev_keys.and_then(|keys| { + serde_json::from_slice::(&keys).ok() + }) { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; + prev_keys.verify_keys.extend(verify_keys); + prev_keys.old_verify_keys.extend(old_verify_keys); + prev_keys.valid_until_ts = new_keys.valid_until_ts; - keys.verify_keys.extend(verify_keys); - keys.old_verify_keys.extend(old_verify_keys); + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&prev_keys) + .expect("serversigningkeys can be serialized"), + )?; - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; + prev_keys.into() + } else { + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&new_keys) + .expect("serversigningkeys can be serialized"), + )?; - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) + new_keys.into() + }, + ) } - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn add_signing_key_from_origin( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result { + let prev_keys = self.server_signingkeys.get(origin.as_bytes())?; + + Ok( + if let Some(mut prev_keys) = prev_keys.and_then(|keys| { + serde_json::from_slice::(&keys).ok() + }) { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + // Moving `verify_keys` no longer present to `old_verify_keys` + for (key_id, key) in prev_keys.verify_keys { + if !verify_keys.contains_key(&key_id) { + prev_keys.old_verify_keys.insert( + key_id, + OldVerifyKey::new( + prev_keys.valid_until_ts, + key.key, + ), + ); + } + } + + prev_keys.verify_keys = verify_keys; + prev_keys.old_verify_keys.extend(old_verify_keys); + prev_keys.valid_until_ts = new_keys.valid_until_ts; + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&prev_keys) + .expect("serversigningkeys can be serialized"), + )?; + + prev_keys.into() + } else { + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&new_keys) + .expect("serversigningkeys can be serialized"), + )?; + + new_keys.into() + }, + ) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. fn signing_keys_for( &self, origin: &ServerName, - ) -> Result> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); + ) -> Result> { + let signingkeys = + self.server_signingkeys.get(origin.as_bytes())?.and_then(|bytes| { + serde_json::from_slice::(&bytes).ok() + }); Ok(signingkeys) } fn database_version(&self) -> Result { self.global.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) + utils::u64_from_bytes(&version).map_err(|_| { + Error::bad_database("Database version id is invalid.") + }) }) } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 900b700b..84140c93 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -9,7 +9,9 @@ use ruma::{ OwnedRoomId, RoomId, UserId, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, service, services, utils, Error, Result, +}; impl service::key_backups::Data for KeyValueDatabase { fn create_backup( @@ -20,12 +22,13 @@ impl service::key_backups::Data for KeyValueDatabase { let version = services().globals.next_count()?.to_string(); let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); self.backupid_algorithm.insert( &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), + &serde_json::to_vec(backup_metadata) + .expect("BackupAlgorithm::to_vec always works"), )?; self.backupid_etag .insert(&key, &services().globals.next_count()?.to_be_bytes())?; @@ -34,13 +37,13 @@ impl service::key_backups::Data for KeyValueDatabase { fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); self.backupid_algorithm.remove(&key)?; self.backupid_etag.remove(&key)?; - key.push(0xff); + key.push(0xFF); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { self.backupkeyid_backup.remove(&outdated_key)?; @@ -56,7 +59,7 @@ impl service::key_backups::Data for KeyValueDatabase { backup_metadata: &Raw, ) -> Result { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); if self.backupid_algorithm.get(&key)?.is_none() { @@ -73,9 +76,12 @@ impl service::key_backups::Data for KeyValueDatabase { Ok(version.to_owned()) } - fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + fn get_latest_backup_version( + &self, + user_id: &UserId, + ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -85,11 +91,13 @@ impl service::key_backups::Data for KeyValueDatabase { .next() .map(|(key, _)| { utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) + .map_err(|_| { + Error::bad_database("backupid_algorithm key is invalid.") + }) }) .transpose() } @@ -99,7 +107,7 @@ impl service::key_backups::Data for KeyValueDatabase { user_id: &UserId, ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -109,33 +117,42 @@ impl service::key_backups::Data for KeyValueDatabase { .next() .map(|(key, value)| { let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; + .map_err(|_| { + Error::bad_database("backupid_algorithm key is invalid.") + })?; Ok(( version, serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") + Error::bad_database( + "Algorithm in backupid_algorithm is invalid.", + ) })?, )) }) .transpose() } - fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { + fn get_backup( + &self, + user_id: &UserId, + version: &str, + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) + self.backupid_algorithm.get(&key)?.map_or(Ok(None), |bytes| { + serde_json::from_slice(&bytes).map_err(|_| { + Error::bad_database( + "Algorithm in backupid_algorithm is invalid.", + ) }) + }) } fn add_key( @@ -147,7 +164,7 @@ impl service::key_backups::Data for KeyValueDatabase { key_data: &Raw, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); if self.backupid_algorithm.get(&key)?.is_none() { @@ -160,9 +177,9 @@ impl service::key_backups::Data for KeyValueDatabase { self.backupid_etag .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(session_id.as_bytes()); self.backupkeyid_backup @@ -173,7 +190,7 @@ impl service::key_backups::Data for KeyValueDatabase { fn count_keys(&self, user_id: &UserId, version: &str) -> Result { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(version.as_bytes()); Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) @@ -181,7 +198,7 @@ impl service::key_backups::Data for KeyValueDatabase { fn get_etag(&self, user_id: &UserId, version: &str) -> Result { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); Ok(utils::u64_from_bytes( @@ -200,40 +217,56 @@ impl service::key_backups::Data for KeyValueDatabase { version: &str, ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); let mut rooms = BTreeMap::::new(); - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); + for result in + self.backupkeyid_backup.scan_prefix(prefix).map(|(key, value)| { + let mut parts = key.rsplit(|&b| b == 0xFF); - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let room_id = RoomId::parse( - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, + let session_id = utils::string_from_bytes( + parts.next().ok_or_else(|| { + Error::bad_database( + "backupkeyid_backup key is invalid.", + ) + })?, ) .map_err(|_| { - Error::bad_database("backupkeyid_backup room_id is invalid room id.") + Error::bad_database( + "backupkeyid_backup session_id is invalid.", + ) })?; - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + let room_id = RoomId::parse( + utils::string_from_bytes(parts.next().ok_or_else( + || { + Error::bad_database( + "backupkeyid_backup key is invalid.", + ) + }, + )?) + .map_err(|_| { + Error::bad_database( + "backupkeyid_backup room_id is invalid.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database( + "backupkeyid_backup room_id is invalid room id.", + ) })?; + let key_data = + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database( + "KeyBackupData in backupkeyid_backup is invalid.", + ) + })?; + Ok::<_, Error>((room_id, session_id, key_data)) }) { @@ -257,33 +290,41 @@ impl service::key_backups::Data for KeyValueDatabase { room_id: &RoomId, ) -> Result>> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); Ok(self .backupkeyid_backup .scan_prefix(prefix) .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); + let mut parts = key.rsplit(|&b| b == 0xFF); - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + let session_id = utils::string_from_bytes( + parts.next().ok_or_else(|| { + Error::bad_database( + "backupkeyid_backup key is invalid.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database( + "backupkeyid_backup session_id is invalid.", + ) })?; + let key_data = + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database( + "KeyBackupData in backupkeyid_backup is invalid.", + ) + })?; + Ok::<_, Error>((session_id, key_data)) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect()) } @@ -295,18 +336,20 @@ impl service::key_backups::Data for KeyValueDatabase { session_id: &str, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(session_id.as_bytes()); self.backupkeyid_backup .get(&key)? .map(|value| { serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") + Error::bad_database( + "KeyBackupData in backupkeyid_backup is invalid.", + ) }) }) .transpose() @@ -314,9 +357,9 @@ impl service::key_backups::Data for KeyValueDatabase { fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); - key.push(0xff); + key.push(0xFF); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { self.backupkeyid_backup.remove(&outdated_key)?; @@ -325,13 +368,18 @@ impl service::key_backups::Data for KeyValueDatabase { Ok(()) } - fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { + fn delete_room_keys( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + key.push(0xFF); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { self.backupkeyid_backup.remove(&outdated_key)?; @@ -348,11 +396,11 @@ impl service::key_backups::Data for KeyValueDatabase { session_id: &str, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(version.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(session_id.as_bytes()); for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 6abe5ba5..53e2146e 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,82 +1,355 @@ -use ruma::api::client::error::ErrorKind; +use ruma::OwnedMxcUri; -use crate::{database::KeyValueDatabase, service, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{ + self, + media::{FileMeta, MediaFileKey}, + }, + utils, Error, Result, +}; + +#[derive(Debug, Eq, PartialEq)] +struct MediaFileKeyParts { + mxc: OwnedMxcUri, + width: u32, + height: u32, + meta: FileMeta, +} + +impl TryFrom<&MediaFileKey> for MediaFileKeyParts { + type Error = Error; + + #[tracing::instrument( + err, + fields(key = utils::u8_slice_to_hex(key.as_bytes())), + )] + fn try_from(key: &MediaFileKey) -> Result { + // Extract parts + + // Extracting mxc url and thumbnail size is a bit fiddly, because the + // thumbnail size may contain 0xFF bytes. + let mut parts = key.as_bytes().splitn(2, |&b| b == 0xFF); + + let mxc_bytes = parts + .next() + .ok_or_else(|| Error::BadDatabase("Missing MXC URI bytes"))?; + + let rest = parts.next().ok_or_else(|| { + Error::BadDatabase("Missing thumbnail size bytes") + })?; + + // Thumbnail size is always 8 bytes + let (thumbnail_size_bytes, rest) = + rest.split_at_checked(8).ok_or_else(|| { + Error::BadDatabase("Missing thumbnail size bytes") + })?; + + // And always followed immediately by a 0xFF separator + let mut parts = rest.split(|&b| b == 0xFF); + + let thumbnail_size_rest = parts.next().ok_or_else(|| { + Error::BadDatabase("Missing Content-Disposition bytes") + })?; + if !thumbnail_size_rest.is_empty() { + return Err(Error::BadDatabase("Thumbnail size part is too long")); + } + + // The remaining parts are straightforward 0xFF-delimited fields + let content_disposition_bytes = parts.next().ok_or_else(|| { + Error::BadDatabase("Missing Content-Disposition bytes") + })?; + + let content_type_bytes = parts + .next() + .ok_or_else(|| Error::BadDatabase("Missing Content-Type bytes"))?; + + if parts.next().is_some() { + return Err(Error::BadDatabase("Too many parts")); + } + + // Parse parts + + let mxc = utils::string_from_bytes(mxc_bytes) + .map_err(|_| Error::BadDatabase("Invalid unicode in MXC URI"))? + .into(); + + let (width, height) = <&[u8; 8]>::try_from(thumbnail_size_bytes) + .map(|eight_bytes| { + let width = u32::from_be_bytes( + eight_bytes[..4].try_into().expect("should be 4 bytes"), + ); + let height = u32::from_be_bytes( + eight_bytes[4..].try_into().expect("should be 4 bytes"), + ); + + (width, height) + }) + .map_err(|_| { + Error::BadDatabase("Wrong number of thumbnail size bytes") + })?; + + let content_disposition = if content_disposition_bytes.is_empty() { + None + } else { + Some(utils::string_from_bytes(content_disposition_bytes).map_err( + |_| { + Error::BadDatabase("Invalid unicode in Content-Disposition") + }, + )?) + }; + + let content_type = if content_type_bytes.is_empty() { + None + } else { + Some(utils::string_from_bytes(content_type_bytes).map_err( + |_| Error::BadDatabase("Invalid unicode in Content-Type"), + )?) + }; + + Ok(MediaFileKeyParts { + mxc, + width, + height, + meta: FileMeta { + content_disposition, + content_type, + }, + }) + } +} impl service::media::Data for KeyValueDatabase { fn create_file_metadata( &self, - mxc: String, + mxc: OwnedMxcUri, width: u32, height: u32, - content_disposition: Option<&str>, - content_type: Option<&str>, - ) -> Result> { + meta: &FileMeta, + ) -> Result { let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(&width.to_be_bytes()); key.extend_from_slice(&height.to_be_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice( - content_disposition + meta.content_disposition .as_ref() - .map(|f| f.as_bytes()) + .map(String::as_bytes) .unwrap_or_default(), ); - key.push(0xff); + key.push(0xFF); key.extend_from_slice( - content_type + meta.content_type .as_ref() - .map(|c| c.as_bytes()) + .map(String::as_bytes) .unwrap_or_default(), ); - self.mediaid_file.insert(&key, &[])?; + let key = MediaFileKey::new(key); + + self.mediaid_file.insert(key.as_bytes(), &[])?; Ok(key) } fn search_file_metadata( &self, - mxc: String, + mxc: OwnedMxcUri, width: u32, height: u32, - ) -> Result<(Option, Option, Vec)> { + ) -> Result> { let mut prefix = mxc.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(&width.to_be_bytes()); prefix.extend_from_slice(&height.to_be_bytes()); - prefix.push(0xff); + prefix.push(0xFF); - let (key, _) = self - .mediaid_file - .scan_prefix(prefix) - .next() - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; - - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in mediaid_file is invalid unicode.") - })?, - ) + let Some((key, _)) = self.mediaid_file.scan_prefix(prefix).next() + else { + return Ok(None); }; - Ok((content_disposition, content_type, key)) + + let key = MediaFileKey::new(key); + let parts = MediaFileKeyParts::try_from(&key)?; + Ok(Some((parts.meta, key))) + } + + fn delete_file_metadata(&self, key: MediaFileKey) -> Result<()> { + self.mediaid_file.remove(key.as_bytes()) + } + + fn search_thumbnails_metadata( + &self, + mxc: OwnedMxcUri, + ) -> Result> { + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xFF); + + self.mediaid_file + .scan_prefix(prefix) + .map(|(key, _)| { + let key = MediaFileKey::new(key); + let parts = MediaFileKeyParts::try_from(&key)?; + Ok((parts.meta, key)) + }) + .collect() + } + + fn all_file_metadata( + &self, + ) -> Box< + dyn Iterator> + '_, + > { + Box::new( + self.mediaid_file + .iter() + .map(|(key, _)| { + let key = MediaFileKey::new(key); + + let parts = MediaFileKeyParts::try_from(&key)?; + Ok(Some((parts.mxc, parts.meta, key))) + }) + .filter_map(Result::transpose), + ) + } +} + +#[cfg(test)] +mod test { + use super::{FileMeta, MediaFileKey, MediaFileKeyParts}; + + #[test] + fn parse_key_basic() { + let mut key = b"mxc://example.com/someid".to_vec(); + key.push(0xFF); + key.extend_from_slice(&640_u32.to_be_bytes()); + key.extend_from_slice(&480_u32.to_be_bytes()); + key.push(0xFF); + key.extend_from_slice(b"inline"); + key.push(0xFF); + key.extend_from_slice(b"image/png"); + let key = MediaFileKey::new(key); + + assert_eq!( + MediaFileKeyParts::try_from(&key).unwrap(), + MediaFileKeyParts { + mxc: "mxc://example.com/someid".into(), + width: 640, + height: 480, + meta: FileMeta { + content_disposition: Some("inline".to_owned()), + content_type: Some("image/png".to_owned()), + } + } + ); + } + + #[test] + fn parse_key_no_content_type() { + let mut key = b"mxc://example.com/someid".to_vec(); + key.push(0xFF); + key.extend_from_slice(&640_u32.to_be_bytes()); + key.extend_from_slice(&480_u32.to_be_bytes()); + key.push(0xFF); + key.extend_from_slice(b"inline"); + key.push(0xFF); + // No content type + let key = MediaFileKey::new(key); + + assert_eq!( + MediaFileKeyParts::try_from(&key).unwrap(), + MediaFileKeyParts { + mxc: "mxc://example.com/someid".into(), + width: 640, + height: 480, + meta: FileMeta { + content_disposition: Some("inline".to_owned()), + content_type: None, + } + } + ); + } + + #[test] + fn parse_key_no_content_disposition() { + let mut key = b"mxc://example.com/someid".to_vec(); + key.push(0xFF); + key.extend_from_slice(&640_u32.to_be_bytes()); + key.extend_from_slice(&480_u32.to_be_bytes()); + key.push(0xFF); + // No content disposition + key.push(0xFF); + key.extend_from_slice(b"image/png"); + let key = MediaFileKey::new(key); + + assert_eq!( + MediaFileKeyParts::try_from(&key).unwrap(), + MediaFileKeyParts { + mxc: "mxc://example.com/someid".into(), + width: 640, + height: 480, + meta: FileMeta { + content_disposition: None, + content_type: Some("image/png".to_owned()), + } + } + ); + } + + #[test] + fn parse_key_no_content_disposition_or_type() { + let mut key = b"mxc://example.com/someid".to_vec(); + key.push(0xFF); + key.extend_from_slice(&640_u32.to_be_bytes()); + key.extend_from_slice(&480_u32.to_be_bytes()); + key.push(0xFF); + // No content disposition + key.push(0xFF); + // No content type + let key = MediaFileKey::new(key); + + assert_eq!( + MediaFileKeyParts::try_from(&key).unwrap(), + MediaFileKeyParts { + mxc: "mxc://example.com/someid".into(), + width: 640, + height: 480, + meta: FileMeta { + content_disposition: None, + content_type: None, + } + } + ); + } + + // Our current media service code has an allowlist of thumbnail dimensions, + // and so we don't expect to create new thumbnails with dimensions + // containing a 0xFF byte. Thumbnails with a 0xFF in the dimensions may + // have been created previously, so we need to be able to read them. + #[test] + fn parse_key_separator_in_thumbnail_dims() { + let mut key = b"mxc://example.com/someid".to_vec(); + key.push(0xFF); + key.extend_from_slice(&[0x0, 0x0, 0xFF, 0xFF]); + key.extend_from_slice(&[0x0, 0x0, 0x10, 0xFF]); + key.push(0xFF); + key.extend_from_slice(b"inline"); + key.push(0xFF); + key.extend_from_slice(b"image/png"); + let key = MediaFileKey::new(key); + + assert_eq!( + MediaFileKeyParts::try_from(&key).unwrap(), + MediaFileKeyParts { + mxc: "mxc://example.com/someid".into(), + width: 0xFFFF, + height: 0x10FF, + meta: FileMeta { + content_disposition: Some("inline".to_owned()), + content_type: Some("image/png".to_owned()), + } + } + ); } } diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 50a6faca..000c9a5d 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -6,33 +6,39 @@ use ruma::{ use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::pusher::Data for KeyValueDatabase { - fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { + fn set_pusher( + &self, + sender: &UserId, + pusher: set_pusher::v3::PusherAction, + ) -> Result<()> { match &pusher { set_pusher::v3::PusherAction::Post(data) => { let mut key = sender.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(data.pusher.ids.pushkey.as_bytes()); self.senderkey_pusher.insert( &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), + &serde_json::to_vec(&pusher) + .expect("Pusher is valid JSON value"), )?; Ok(()) } set_pusher::v3::PusherAction::Delete(ids) => { let mut key = sender.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(ids.pushkey.as_bytes()); - self.senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into) + self.senderkey_pusher.remove(&key) } } } - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { let mut senderkey = sender.as_bytes().to_vec(); - senderkey.push(0xff); + senderkey.push(0xFF); senderkey.extend_from_slice(pushkey.as_bytes()); self.senderkey_pusher @@ -46,7 +52,7 @@ impl service::pusher::Data for KeyValueDatabase { fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); self.senderkey_pusher .scan_prefix(prefix) @@ -62,16 +68,20 @@ impl service::pusher::Data for KeyValueDatabase { sender: &UserId, ) -> Box> + 'a> { let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { - let mut parts = k.splitn(2, |&b| b == 0xff); + let mut parts = k.splitn(2, |&b| b == 0xFF); let _senderkey = parts.next(); - let push_key = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; - let push_key_string = utils::string_from_bytes(push_key) - .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + let push_key = parts.next().ok_or_else(|| { + Error::bad_database("Invalid senderkey_pusher in db") + })?; + let push_key_string = + utils::string_from_bytes(push_key).map_err(|_| { + Error::bad_database( + "Invalid pusher bytes in senderkey_pusher", + ) + })?; Ok(push_key_string) })) diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms.rs similarity index 100% rename from src/database/key_value/rooms/mod.rs rename to src/database/key_value/rooms.rs index e7b53d30..825dd1de 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms.rs @@ -1,3 +1,5 @@ +use crate::{database::KeyValueDatabase, service}; + mod alias; mod auth_chain; mod directory; @@ -16,6 +18,4 @@ mod threads; mod timeline; mod user; -use crate::{database::KeyValueDatabase, service}; - impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 6f230323..a1355ed2 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,22 +1,32 @@ -use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; +use ruma::{ + api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, + RoomId, +}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, service, services, utils, Error, Result, +}; impl service::rooms::alias::Data for KeyValueDatabase { + #[tracing::instrument(skip(self))] fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + aliasid.push(0xFF); + aliasid + .extend_from_slice(&services().globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; Ok(()) } + #[tracing::instrument(skip(self))] fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); + if let Some(room_id) = + self.alias_roomid.get(alias.alias().as_bytes())? + { + let mut prefix = room_id.clone(); + prefix.push(0xFF); for (key, _) in self.aliasid_alias.scan_prefix(prefix) { self.aliasid_alias.remove(&key)?; @@ -31,30 +41,45 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { + #[tracing::instrument(skip(self))] + fn resolve_local_alias( + &self, + alias: &RoomAliasId, + ) -> Result> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + RoomId::parse(utils::string_from_bytes(&bytes).map_err( + |_| { + Error::bad_database( + "Room ID in alias_roomid is invalid unicode.", + ) + }, + )?) + .map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid.") + }) }) .transpose() } + #[tracing::instrument(skip(self))] fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .map_err(|_| { + Error::bad_database("Invalid alias bytes in aliasid_alias.") + })? .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) + .map_err(|_| { + Error::bad_database("Invalid alias in aliasid_alias.") + }) })) } } diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 60057ac1..b736e524 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,61 +1,56 @@ -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, mem::size_of}; -use crate::{database::KeyValueDatabase, service, utils, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::short::ShortEventId}, + utils, Result, +}; impl service::rooms::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - + #[tracing::instrument(skip(self, key))] + fn get_cached_eventid_authchain( + &self, + key: &[ShortEventId], + ) -> Result>> { // We only save auth chains for single events in the db if key.len() == 1 { - // Check DB cache let chain = self .shorteventid_authchain - .get(&key[0].to_be_bytes())? + .get(&key[0].get().to_be_bytes())? .map(|chain| { chain .chunks_exact(size_of::()) - .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) + .map(|chunk| { + ShortEventId::new( + utils::u64_from_bytes(chunk) + .expect("byte length is correct"), + ) + }) .collect() }); - if let Some(chain) = chain { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } + return Ok(chain); } Ok(None) } - fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + fn cache_auth_chain( + &self, + key: &[ShortEventId], + auth_chain: &HashSet, + ) -> Result<()> { // Only persist single events in db if key.len() == 1 { self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), + &key[0].get().to_be_bytes(), &auth_chain .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) + .flat_map(|s| s.get().to_be_bytes().to_vec()) .collect::>(), )?; } - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(key, auth_chain); - Ok(()) } } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index e05dee82..58964927 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -3,26 +3,34 @@ use ruma::{OwnedRoomId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::rooms::directory::Data for KeyValueDatabase { + #[tracing::instrument(skip(self))] fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } + #[tracing::instrument(skip(self))] fn set_not_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.remove(room_id.as_bytes()) } + #[tracing::instrument(skip(self))] fn is_public_room(&self, room_id: &RoomId) -> Result { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms<'a>(&'a self) -> Box> + 'a> { + #[tracing::instrument(skip(self))] + fn public_rooms<'a>( + &'a self, + ) -> Box> + 'a> { Box::new(self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Room ID in publicroomids is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid.") + }) })) } } diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus.rs similarity index 90% rename from src/database/key_value/rooms/edus/mod.rs rename to src/database/key_value/rooms/edus.rs index 7abf946f..bf25ee37 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus.rs @@ -1,6 +1,5 @@ -mod presence; -mod read_receipt; - use crate::{database::KeyValueDatabase, service}; +mod read_receipt; + impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs deleted file mode 100644 index 904b1c44..00000000 --- a/src/database/key_value/rooms/edus/presence.rs +++ /dev/null @@ -1,152 +0,0 @@ -use std::collections::HashMap; - -use ruma::{ - events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, -}; - -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; - -impl service::rooms::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = services().globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } - - /* - fn presence_maintain(&self, db: Arc>) { - // TODO @M0dEx: move this to a timed tasks module - tokio::spawn(async move { - loop { - select! { - Some(user_id) = self.presence_timers.next() { - // TODO @M0dEx: would it be better to acquire the lock outside the loop? - let guard = db.read().await; - - // TODO @M0dEx: add self.presence_timers - // TODO @M0dEx: maintain presence - } - } - } - }); - } - */ -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) -} diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index fa97ea34..f8e1470f 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,12 +1,14 @@ -use std::mem; - use ruma::{ - events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId, + events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, + OwnedUserId, RoomId, UserId, }; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, service, services, utils, Error, Result, +}; impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { + #[tracing::instrument(skip(self, event))] fn readreceipt_update( &self, user_id: &UserId, @@ -14,7 +16,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { event: ReceiptEvent, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let mut last_possible_key = prefix.clone(); last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); @@ -25,7 +27,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { .iter_from(&last_possible_key, true) .take_while(|(key, _)| key.starts_with(&prefix)) .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element") == user_id.as_bytes() @@ -36,18 +38,22 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { } let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); + room_latest_id + .extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xFF); room_latest_id.extend_from_slice(user_id.as_bytes()); self.readreceiptid_readreceipt.insert( &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), + &serde_json::to_vec(&event) + .expect("EduEvent::to_string always works"), )?; Ok(()) } + #[tracing::instrument(skip(self))] + #[allow(clippy::type_complexity)] fn readreceipts_since<'a>( &'a self, room_id: &RoomId, @@ -62,11 +68,12 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { > + 'a, > { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let prefix2 = prefix.clone(); let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + // +1 so we don't send the event at since + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); Box::new( self.readreceiptid_readreceipt @@ -74,23 +81,37 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(k, v)| { let count = utils::u64_from_bytes( - &k[prefix.len()..prefix.len() + mem::size_of::()], + &k[prefix.len()..prefix.len() + size_of::()], ) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + .map_err(|_| { + Error::bad_database( + "Invalid readreceiptid count in db.", + ) + })?; let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, + utils::string_from_bytes( + &k[prefix.len() + size_of::() + 1..], + ) + .map_err(|_| { + Error::bad_database( + "Invalid readreceiptid userid bytes in db.", + ) + })?, ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + .map_err(|_| { + Error::bad_database( + "Invalid readreceiptid userid in db.", + ) + })?; let mut json = - serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database( - "Read receipt in roomlatestid_roomlatest is invalid json.", - ) - })?; + serde_json::from_slice::(&v) + .map_err(|_| { + Error::bad_database( + "Read receipt in roomlatestid_roomlatest \ + is invalid json.", + ) + })?; json.remove("room_id"); Ok(( @@ -105,35 +126,47 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { ) } - fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + #[tracing::instrument(skip(self))] + fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; + self.roomuserid_privateread.insert(&key, &count.to_be_bytes())?; self.roomuserid_lastprivatereadupdate .insert(&key, &services().globals.next_count()?.to_be_bytes()) } - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + #[tracing::instrument(skip(self))] + fn private_read_get( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) + self.roomuserid_privateread.get(&key)?.map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) } - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn last_privateread_update( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); Ok(self @@ -141,7 +174,9 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { .get(&key)? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + Error::bad_database( + "Count in roomuserid_lastprivatereadupdate is invalid.", + ) }) }) .transpose()? diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index a19d52cb..01952379 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -11,11 +11,11 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase { ll_user: &UserId, ) -> Result { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(ll_user.as_bytes()); Ok(self.lazyloadedids.get(&key)?.is_some()) } @@ -28,11 +28,11 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase { confirmed_user_ids: &mut dyn Iterator, ) -> Result<()> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); for ll_id in confirmed_user_ids { let mut key = prefix.clone(); @@ -50,11 +50,11 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase { room_id: &RoomId, ) -> Result<()> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); for (key, _) in self.lazyloadedids.scan_prefix(prefix) { self.lazyloadedids.remove(&key)?; diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 57540c40..dc8c7a19 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,11 +1,14 @@ use ruma::{OwnedRoomId, RoomId}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, service, services, utils, Error, Result, +}; impl service::rooms::metadata::Data for KeyValueDatabase { + #[tracing::instrument(skip(self))] fn exists(&self, room_id: &RoomId) -> Result { let prefix = match services().rooms.short.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), + Some(b) => b.get().to_be_bytes().to_vec(), None => return Ok(false), }; @@ -18,14 +21,18 @@ impl service::rooms::metadata::Data for KeyValueDatabase { .is_some()) } - fn iter_ids<'a>(&'a self) -> Box> + 'a> { + fn iter_ids<'a>( + &'a self, + ) -> Box> + 'a> { Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Room ID in publicroomids is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("Room ID in roomid_shortroomid is invalid.") + }) })) } diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index 7985ba81..562331ef 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,25 +1,27 @@ use ruma::{CanonicalJsonObject, EventId}; -use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; +use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::rooms::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + fn get_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { + self.eventid_outlierpdu.get(event_id.as_bytes())?.map_or( + Ok(None), + |pdu| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + }, + ) } - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + #[tracing::instrument(skip(self, pdu))] + fn add_pdu_outlier( + &self, + event_id: &EventId, + pdu: &CanonicalJsonObject, + ) -> Result<()> { self.eventid_outlierpdu.insert( event_id.as_bytes(), &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 0641f9d8..2376e334 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,10 +1,16 @@ -use std::{mem, sync::Arc}; +use std::sync::Arc; use ruma::{EventId, RoomId, UserId}; use crate::{ database::KeyValueDatabase, - service::{self, rooms::timeline::PduCount}, + service::{ + self, + rooms::{ + short::ShortRoomId, + timeline::{PduCount, PduId}, + }, + }, services, utils, Error, PduEvent, Result, }; @@ -19,10 +25,11 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn relations_until<'a>( &'a self, user_id: &'a UserId, - shortroomid: u64, + shortroomid: ShortRoomId, target: u64, until: PduCount, - ) -> Result> + 'a>> { + ) -> Result> + 'a>> + { let prefix = target.to_be_bytes().to_vec(); let mut current = prefix.clone(); @@ -40,17 +47,27 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase { .iter_from(¤t, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(tofrom, _data)| { - let from = utils::u64_from_bytes(&tofrom[(mem::size_of::())..]) - .map_err(|_| Error::bad_database("Invalid count in tofrom_relation."))?; + let from = + utils::u64_from_bytes(&tofrom[(size_of::())..]) + .map_err(|_| { + Error::bad_database( + "Invalid count in tofrom_relation.", + ) + })?; - let mut pduid = shortroomid.to_be_bytes().to_vec(); + let mut pduid = shortroomid.get().to_be_bytes().to_vec(); pduid.extend_from_slice(&from.to_be_bytes()); + let pduid = PduId::new(pduid); let mut pdu = services() .rooms .timeline .get_pdu_from_id(&pduid)? - .ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?; + .ok_or_else(|| { + Error::bad_database( + "Pdu in tofrom_relation is invalid.", + ) + })?; if pdu.sender != user_id { pdu.remove_transaction_id()?; } @@ -59,7 +76,11 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase { )) } - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + fn mark_as_referenced( + &self, + room_id: &RoomId, + event_ids: &[Arc], + ) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -69,7 +90,11 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase { Ok(()) } - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + fn is_event_referenced( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(event_id.as_bytes()); Ok(self.referencedevents.get(&key)?.is_some()) @@ -80,8 +105,6 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase { } fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) + self.softfailedeventids.get(event_id.as_bytes()).map(|o| o.is_some()) } } diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index ad573f06..510feca5 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,65 +1,110 @@ use ruma::RoomId; -use crate::{database::KeyValueDatabase, service, services, utils, Result}; +use crate::{ + database::KeyValueDatabase, + service::{ + self, + rooms::{short::ShortRoomId, timeline::PduId}, + }, + services, utils, Result, +}; + +/// Splits a string into tokens used as keys in the search inverted index +/// +/// This may be used to tokenize both message bodies (for indexing) or search +/// queries (for querying). +fn tokenize(body: &str) -> impl Iterator + '_ { + body.split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) +} impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { - let mut batch = message_body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(pdu_id); // TODO: currently we save the room id a second time here - (key, Vec::new()) - }); + #[tracing::instrument(skip(self))] + fn index_pdu( + &self, + shortroomid: ShortRoomId, + pdu_id: &PduId, + message_body: &str, + ) -> Result<()> { + let mut batch = tokenize(message_body).map(|word| { + let mut key = shortroomid.get().to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id.as_bytes()); + (key, Vec::new()) + }); self.tokenids.insert_batch(&mut batch) } + #[tracing::instrument(skip(self))] + fn deindex_pdu( + &self, + shortroomid: ShortRoomId, + pdu_id: &PduId, + message_body: &str, + ) -> Result<()> { + let batch = tokenize(message_body).map(|word| { + let mut key = shortroomid.get().to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xFF); + // TODO: currently we save the room id a second time here + key.extend_from_slice(pdu_id.as_bytes()); + key + }); + + for token in batch { + self.tokenids.remove(&token)?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a>, Vec)>> { + ) -> Result + 'a>, Vec)>> + { let prefix = services() .rooms .short .get_shortroomid(room_id)? .expect("room exists") + .get() .to_be_bytes() .to_vec(); - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); + let words: Vec<_> = tokenize(search_string).collect(); let iterators = words.clone().into_iter().map(move |word| { let mut prefix2 = prefix.clone(); prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); + prefix2.push(0xFF); let prefix3 = prefix2.clone(); let mut last_possible_id = prefix2.clone(); last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first + // Newest pdus first + .iter_from(&last_possible_id, true) .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(key, _)| key[prefix3.len()..].to_vec()) + .map(move |(key, _)| PduId::new(key[prefix3.len()..].to_vec())) }); - let common_elements = match utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) { - Some(it) => it, - None => return Ok(None), + // We compare b with a because we reversed the iterator earlier + let Some(common_elements) = + utils::common_elements(iterators, |a, b| { + b.as_bytes().cmp(a.as_bytes()) + }) + else { + return Ok(None); }; Ok(Some((Box::new(common_elements), words))) diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index 98cfa48a..ee137733 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -2,216 +2,220 @@ use std::sync::Arc; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{ + self, + rooms::short::{ + ShortEventId, ShortRoomId, ShortStateHash, ShortStateKey, + }, + }, + services, utils, Error, Result, +}; impl service::rooms::short::Data for KeyValueDatabase { - fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } + #[tracing::instrument(skip(self))] + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result<(ShortEventId, bool)> { + let (short, created) = if let Some(shorteventid) = + self.eventid_shorteventid.get(event_id.as_bytes())? + { + let shorteventid = + utils::u64_from_bytes(&shorteventid).map_err(|_| { + Error::bad_database("Invalid shorteventid in db.") + })?; - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = services().globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } + (shorteventid, false) + } else { + let shorteventid = services().globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + (shorteventid, true) }; - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) + Ok((ShortEventId::new(short), created)) } + #[tracing::instrument(skip(self), fields(cache_result))] fn get_shortstatekey( &self, event_type: &StateEventType, state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); + ) -> Result> { + let mut db_key = event_type.to_string().as_bytes().to_vec(); + db_key.push(0xFF); + db_key.extend_from_slice(state_key.as_bytes()); let short = self .statekey_shortstatekey - .get(&statekey)? + .get(&db_key)? .map(|shortstatekey| { utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + .map_err(|_| { + Error::bad_database("Invalid shortstatekey in db.") + }) + .map(ShortStateKey::new) }) .transpose()?; - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - Ok(short) } + #[tracing::instrument(skip(self))] fn get_or_create_shortstatekey( &self, event_type: &StateEventType, state_key: &str, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) + ) -> Result<(ShortStateKey, bool)> { + let mut db_key = event_type.to_string().as_bytes().to_vec(); + db_key.push(0xFF); + db_key.extend_from_slice(state_key.as_bytes()); + + let (short, created) = if let Some(shortstatekey) = + self.statekey_shortstatekey.get(&db_key)? { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = services().globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } + ( + utils::u64_from_bytes(&shortstatekey).map_err(|_| { + Error::bad_database("Invalid shortstatekey in db.") + })?, + false, + ) + } else { + let shortstatekey = services().globals.next_count()?; + self.statekey_shortstatekey + .insert(&db_key, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &db_key)?; + (shortstatekey, true) }; - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); + let short = ShortStateKey::new(short); - Ok(short) + Ok((short, created)) } - fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - + #[tracing::instrument(skip(self))] + fn get_eventid_from_short( + &self, + shorteventid: ShortEventId, + ) -> Result> { let bytes = self .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + .get(&shorteventid.get().to_be_bytes())? + .ok_or_else(|| { + Error::bad_database("Shorteventid does not exist") + })?; - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); + let event_id = EventId::parse_arc( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "EventID in shorteventid_eventid is invalid unicode.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database("EventId in shorteventid_eventid is invalid.") + })?; Ok(event_id) } - fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - + #[tracing::instrument(skip(self))] + fn get_statekey_from_short( + &self, + shortstatekey: ShortStateKey, + ) -> Result<(StateEventType, String)> { let bytes = self .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + .get(&shortstatekey.get().to_be_bytes())? + .ok_or_else(|| { + Error::bad_database("Shortstatekey does not exist") + })?; - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?); - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") + let mut parts = bytes.splitn(2, |&b| b == 0xFF); + let eventtype_bytes = + parts.next().expect("split always returns one entry"); + let statekey_bytes = parts.next().ok_or_else(|| { + Error::bad_database("Invalid statekey in shortstatekey_statekey.") })?; - let result = (event_type, state_key); + let event_type = StateEventType::from( + utils::string_from_bytes(eventtype_bytes).map_err(|_| { + Error::bad_database( + "Event type in shortstatekey_statekey is invalid unicode.", + ) + })?, + ); - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); + let state_key = + utils::string_from_bytes(statekey_bytes).map_err(|_| { + Error::bad_database( + "Statekey in shortstatekey_statekey is invalid unicode.", + ) + })?; - Ok(result) + Ok((event_type, state_key)) } - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + /// Returns `(shortstatehash, already_existed)` + #[tracing::instrument(skip(self))] + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(ShortStateHash, bool)> { + let (short, existed) = if let Some(shortstatehash) = + self.statehash_shortstatehash.get(state_hash)? + { + ( + utils::u64_from_bytes(&shortstatehash).map_err(|_| { + Error::bad_database("Invalid shortstatehash in db.") + })?, true, - ), - None => { - let shortstatehash = services().globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) + ) + } else { + let shortstatehash = services().globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + }; + + Ok((ShortStateHash::new(short), existed)) } - fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(room_id.as_bytes())? .map(|bytes| { utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + .map_err(|_| { + Error::bad_database("Invalid shortroomid in db.") + }) + .map(ShortRoomId::new) }) .transpose() } - fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = services().globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result { + let short = if let Some(short) = + self.roomid_shortroomid.get(room_id.as_bytes())? + { + utils::u64_from_bytes(&short).map_err(|_| { + Error::bad_database("Invalid shortroomid in db.") + })? + } else { + let short = services().globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + }; + + Ok(ShortRoomId::new(short)) } } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index f17d37bb..b7cbcb39 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,69 +1,101 @@ -use ruma::{EventId, OwnedEventId, RoomId}; -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; -use std::sync::Arc; -use tokio::sync::MutexGuard; +use ruma::{EventId, OwnedEventId, OwnedRoomId, RoomId}; -use crate::{database::KeyValueDatabase, service, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{ + self, + globals::marker, + rooms::short::{ShortEventId, ShortStateHash}, + }, + utils::{self, on_demand_hashmap::KeyToken}, + Error, Result, +}; impl service::rooms::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) + fn get_room_shortstatehash( + &self, + room_id: &RoomId, + ) -> Result> { + self.roomid_shortstatehash.get(room_id.as_bytes())?.map_or( + Ok(None), + |bytes| { + Ok(Some(ShortStateHash::new( + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Invalid shortstatehash in roomid_shortstatehash", + ) + })?, + ))) + }, + ) } fn set_room_state( &self, - room_id: &RoomId, - new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + room_id: &KeyToken, + new_shortstatehash: ShortStateHash, ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + self.roomid_shortstatehash.insert( + room_id.as_bytes(), + &new_shortstatehash.get().to_be_bytes(), + )?; Ok(()) } - fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + fn set_event_state( + &self, + shorteventid: ShortEventId, + shortstatehash: ShortStateHash, + ) -> Result<()> { + self.shorteventid_shortstatehash.insert( + &shorteventid.get().to_be_bytes(), + &shortstatehash.get().to_be_bytes(), + )?; Ok(()) } - fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + fn get_forward_extremities( + &self, + room_id: &RoomId, + ) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err( + |_| { + Error::bad_database( + "EventID in roomid_pduleaves is invalid unicode.", + ) + }, + )?) + .map_err(|_| { + Error::bad_database( + "EventId in roomid_pduleaves is invalid.", + ) + }) }) .collect() } - fn set_forward_extremities<'a>( + fn set_forward_extremities( &self, - room_id: &RoomId, + room_id: &KeyToken, event_ids: Vec, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { self.roomid_pduleaves.remove(&key)?; } for event_id in event_ids { - let mut key = prefix.to_owned(); + let mut key = prefix.clone(); key.extend_from_slice(event_id.as_bytes()); self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; } diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index fe40b937..1e9e1b64 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,186 +1,34 @@ -use std::{collections::HashMap, sync::Arc}; +use ruma::EventId; -use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; -use async_trait::async_trait; -use ruma::{events::StateEventType, EventId, RoomId}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::short::ShortStateHash}, + utils, Error, Result, +}; -#[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { - async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = services() - .rooms - .state_compressor - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state.iter() { - let parsed = services() - .rooms - .state_compressor - .parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = services() - .rooms - .state_compressor - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state.iter() { - let (_, eventid) = services() - .rooms - .state_compressor - .parse_compressed_state_event(compressed)?; - if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match services() - .rooms - .short - .get_shortstatekey(event_type, state_key)? - { - Some(s) => s, - None => return Ok(None), - }; - let full_state = services() - .rooms - .state_compressor - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - services() - .rooms - .state_compressor - .parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| { - services().rooms.timeline.get_pdu(&event_id) - }) - } - /// Returns the state hash for this pdu. - fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { + fn pdu_shortstatehash( + &self, + event_id: &EventId, + ) -> Result> { + self.eventid_shorteventid.get(event_id.as_bytes())?.map_or( + Ok(None), + |shorteventid| { self.shorteventid_shortstatehash .get(&shorteventid)? .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database( + "Invalid shortstatehash bytes in \ + shorteventid_shortstatehash", + ) + }) + .map(ShortStateHash::new) }) .transpose() - }) - } - - /// Returns the full room state. - async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + }, + ) } } diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 49e3842b..c90a18b1 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,4 +1,4 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, @@ -13,20 +13,24 @@ use crate::{ }; impl service::rooms::state_cache::Data for KeyValueDatabase { - fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + fn mark_as_once_joined( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); self.roomuseroncejoinedids.insert(&userroom_id, &[]) } fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); + roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); self.userroomid_joined.insert(&userroom_id, &[])?; @@ -46,11 +50,11 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { last_state: Option>>, ) -> Result<()> { let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); + roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); self.userroomid_invitestate.insert( @@ -72,17 +76,18 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); + roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); + // TODO self.userroomid_leftstate.insert( &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO + )?; self.roomuserid_leftcount.insert( &roomuser_id, &services().globals.next_count()?.to_be_bytes(), @@ -95,13 +100,16 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { Ok(()) } - fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + fn update_joined_count( + &self, + room_id: &RoomId, + ) -> Result> { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; let mut joined_servers = HashSet::new(); let mut real_users = HashSet::new(); - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { + for joined in self.room_members(room_id).filter_map(Result::ok) { joined_servers.insert(joined.server_name().to_owned()); if joined.server_name() == services().globals.server_name() && !services().users.is_deactivated(&joined).unwrap_or(true) @@ -111,7 +119,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { joinedcount += 1; } - for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { + for _invited in + self.room_members_invited(room_id).filter_map(Result::ok) + { invitedcount += 1; } @@ -121,20 +131,17 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { self.roomid_invitedcount .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { + for old_joined_server in + self.room_servers(room_id).filter_map(Result::ok) + { if !joined_servers.remove(&old_joined_server) { // Server not in room anymore let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); + roomserver_id.push(0xFF); roomserver_id.extend_from_slice(old_joined_server.as_bytes()); let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); + serverroom_id.push(0xFF); serverroom_id.extend_from_slice(room_id.as_bytes()); self.roomserverids.remove(&roomserver_id)?; @@ -145,92 +152,55 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { // Now only new servers are in joined_servers anymore for server in joined_servers { let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); + roomserver_id.push(0xFF); roomserver_id.extend_from_slice(server.as_bytes()); let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); + serverroom_id.push(0xFF); serverroom_id.extend_from_slice(room_id.as_bytes()); self.roomserverids.insert(&roomserver_id, &[])?; self.serverroomids.insert(&serverroom_id, &[])?; } - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) + Ok(real_users) } - #[tracing::instrument(skip(self, room_id))] - fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } + #[tracing::instrument( + skip(self, appservice), + fields(appservice_id = appservice.registration.id), + )] + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &RegistrationInfo, + ) -> Result { + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + services().globals.server_name(), + ) + .ok(); - #[tracing::instrument(skip(self, room_id, appservice))] - fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.registration.id)) - .copied(); + let in_room = bridge_user_id + .is_some_and(|id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(room_id).any(|userid| { + userid.is_ok_and(|userid| { + appservice.users.is_match(userid.as_str()) + }) + }); - if let Some(b) = maybe { - Ok(b) - } else { - let bridge_user_id = UserId::parse_with_server_name( - appservice.registration.sender_localpart.as_str(), - services().globals.server_name(), - ) - .ok(); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| appservice.users.is_match(userid.as_str())) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.registration.id.clone(), in_room); - - Ok(in_room) - } + Ok(in_room) } /// Makes a user forget a room. #[tracing::instrument(skip(self))] fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); + roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); self.userroomid_leftstate.remove(&userroom_id)?; @@ -246,51 +216,66 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { room_id: &RoomId, ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { ServerName::parse( utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") + Error::bad_database( + "Server name in roomserverids is invalid unicode.", + ) })?, ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) + .map_err(|_| { + Error::bad_database("Server name in roomserverids is invalid.") + }) })) } #[tracing::instrument(skip(self))] - fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + fn server_in_room( + &self, + server: &ServerName, + room_id: &RoomId, + ) -> Result { let mut key = server.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); self.serverroomids.get(&key).map(|o| o.is_some()) } - /// Returns an iterator of all rooms a server participates in (as far as we know). + /// Returns an iterator of all rooms a server participates in (as far as we + /// know). #[tracing::instrument(skip(self))] fn server_rooms<'a>( &'a self, server: &ServerName, ) -> Box> + 'a> { let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { RoomId::parse( utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + .map_err(|_| { + Error::bad_database( + "RoomId in serverroomids is invalid unicode.", + ) + })?, ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) + .map_err(|_| { + Error::bad_database("RoomId in serverroomids is invalid.") + }) })) } @@ -301,20 +286,24 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { room_id: &RoomId, ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { UserId::parse( utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + Error::bad_database( + "User ID in roomuserid_joined is invalid unicode.", + ) })?, ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid.") + }) })) } @@ -323,8 +312,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { self.roomid_joinedcount .get(room_id.as_bytes())? .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + utils::u64_from_bytes(&b).map_err(|_| { + Error::bad_database("Invalid joinedcount in db.") + }) }) .transpose() } @@ -334,42 +324,13 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { self.roomid_invitedcount .get(room_id.as_bytes())? .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + utils::u64_from_bytes(&b).map_err(|_| { + Error::bad_database("Invalid joinedcount in db.") + }) }) .transpose() } - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> Box> + 'a> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - Box::new( - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database( - "User ID in room_useroncejoined is invalid unicode.", - ) - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }), - ) - } - /// Returns an iterator over all invited members of a room. #[tracing::instrument(skip(self))] fn room_members_invited<'a>( @@ -377,53 +338,64 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { room_id: &RoomId, ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); - Box::new( - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, + Box::new(self.roomuserid_invitecount.scan_prefix(prefix).map( + |(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }), - ) + .map_err(|_| { + Error::bad_database( + "User ID in roomuserid_invited is invalid unicode.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database( + "User ID in roomuserid_invited is invalid.", + ) + }) + }, + )) } #[tracing::instrument(skip(self))] - fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + fn get_invite_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) + self.roomuserid_invitecount.get(&key)?.map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) } #[tracing::instrument(skip(self))] - fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + fn get_left_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); self.roomuserid_leftcount .get(&key)? .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid leftcount in db.") + }) }) .transpose() } @@ -440,15 +412,22 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { .map(|(key, _)| { RoomId::parse( utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xFF) .next() .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + Error::bad_database( + "Room ID in userroomid_joined is invalid \ + unicode.", + ) })?, ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + .map_err(|_| { + Error::bad_database( + "Room ID in userroomid_joined is invalid.", + ) + }) }), ) } @@ -459,35 +438,43 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box>)>> + 'a> { + ) -> Box< + dyn Iterator< + Item = Result<(OwnedRoomId, Vec>)>, + > + 'a, + > { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); - Box::new( - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, + Box::new(self.userroomid_invitestate.scan_prefix(prefix).map( + |(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid.") - })?; + Error::bad_database( + "Room ID in userroomid_invited is invalid unicode.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database( + "Room ID in userroomid_invited is invalid.", + ) + })?; - let state = serde_json::from_slice(&state).map_err(|_| { - Error::bad_database("Invalid state in userroomid_invitestate.") - })?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database( + "Invalid state in userroomid_invitestate.", + ) + })?; - Ok((room_id, state)) - }), - ) + Ok((room_id, state)) + }, + )) } #[tracing::instrument(skip(self))] @@ -497,14 +484,17 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { room_id: &RoomId, ) -> Result>>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); self.userroomid_invitestate .get(&key)? .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database( + "Invalid state in userroomid_invitestate.", + ) + })?; Ok(state) }) @@ -518,14 +508,17 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { room_id: &RoomId, ) -> Result>>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(room_id.as_bytes()); self.userroomid_leftstate .get(&key)? .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database( + "Invalid state in userroomid_leftstate.", + ) + })?; Ok(state) }) @@ -538,41 +531,48 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box>)>> + 'a> { + ) -> Box< + dyn Iterator>)>> + + 'a, + > { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); - Box::new( - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, + Box::new(self.userroomid_leftstate.scan_prefix(prefix).map( + |(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xFF) + .next() + .expect("rsplit always returns an element"), ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid.") - })?; + Error::bad_database( + "Room ID in userroomid_invited is invalid unicode.", + ) + })?, + ) + .map_err(|_| { + Error::bad_database( + "Room ID in userroomid_invited is invalid.", + ) + })?; - let state = serde_json::from_slice(&state).map_err(|_| { - Error::bad_database("Invalid state in userroomid_leftstate.") - })?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database( + "Invalid state in userroomid_leftstate.", + ) + })?; - Ok((room_id, state)) - }), - ) + Ok((room_id, state)) + }, + )) } #[tracing::instrument(skip(self))] fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) @@ -581,7 +581,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { #[tracing::instrument(skip(self))] fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) @@ -590,7 +590,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { #[tracing::instrument(skip(self))] fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) @@ -599,7 +599,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { #[tracing::instrument(skip(self))] fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 65ea603e..4f487ce2 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -2,19 +2,28 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{ database::KeyValueDatabase, - service::{self, rooms::state_compressor::data::StateDiff}, + service::{ + self, + rooms::{ + short::ShortStateHash, + state_compressor::{data::StateDiff, CompressedStateEvent}, + }, + }, utils, Error, Result, }; impl service::rooms::state_compressor::Data for KeyValueDatabase { - fn get_statediff(&self, shortstatehash: u64) -> Result { + fn get_statediff( + &self, + shortstatehash: ShortStateHash, + ) -> Result { let value = self .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? + .get(&shortstatehash.get().to_be_bytes())? .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - let parent = if parent != 0 { Some(parent) } else { None }; + let parent = utils::u64_from_bytes(&value[0..size_of::()]) + .expect("bytes have right length"); + let parent = (parent != 0).then_some(ShortStateHash::new(parent)); let mut add_mode = true; let mut added = HashSet::new(); @@ -28,9 +37,13 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { continue; } if add_mode { - added.insert(v.try_into().expect("we checked the size above")); + added.insert(CompressedStateEvent::from_bytes( + v.try_into().expect("we checked the size above"), + )); } else { - removed.insert(v.try_into().expect("we checked the size above")); + removed.insert(CompressedStateEvent::from_bytes( + v.try_into().expect("we checked the size above"), + )); } i += 2 * size_of::(); } @@ -42,20 +55,25 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { }) } - fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { - let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + fn save_statediff( + &self, + shortstatehash: ShortStateHash, + diff: StateDiff, + ) -> Result<()> { + let mut value = + diff.parent.map_or(0, |h| h.get()).to_be_bytes().to_vec(); for new in diff.added.iter() { - value.extend_from_slice(&new[..]); + value.extend_from_slice(&new.as_bytes()); } if !diff.removed.is_empty() { value.extend_from_slice(&0_u64.to_be_bytes()); for removed in diff.removed.iter() { - value.extend_from_slice(&removed[..]); + value.extend_from_slice(&removed.as_bytes()); } } self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value) + .insert(&shortstatehash.get().to_be_bytes(), &value) } } diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs index 5e3dc970..8f331d4f 100644 --- a/src/database/key_value/rooms/threads.rs +++ b/src/database/key_value/rooms/threads.rs @@ -1,8 +1,13 @@ -use std::mem; +use ruma::{ + api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, + UserId, +}; -use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; - -use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::timeline::PduId}, + services, utils, Error, PduEvent, Result, +}; impl service::rooms::threads::Data for KeyValueDatabase { fn threads_until<'a>( @@ -17,6 +22,7 @@ impl service::rooms::threads::Data for KeyValueDatabase { .short .get_shortroomid(room_id)? .expect("room exists") + .get() .to_be_bytes() .to_vec(); @@ -28,14 +34,24 @@ impl service::rooms::threads::Data for KeyValueDatabase { .iter_from(¤t, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pduid, _users)| { - let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) - .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; + let count = + utils::u64_from_bytes(&pduid[(size_of::())..]) + .map_err(|_| { + Error::bad_database( + "Invalid pduid in threadid_userids.", + ) + })?; + + let pduid = PduId::new(pduid); + let mut pdu = services() .rooms .timeline .get_pdu_from_id(&pduid)? .ok_or_else(|| { - Error::bad_database("Invalid pduid reference in threadid_userids") + Error::bad_database( + "Invalid pduid reference in threadid_userids", + ) })?; if pdu.sender != user_id { pdu.remove_transaction_id()?; @@ -45,30 +61,45 @@ impl service::rooms::threads::Data for KeyValueDatabase { )) } - fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()> { + fn update_participants( + &self, + root_id: &PduId, + participants: &[OwnedUserId], + ) -> Result<()> { let users = participants .iter() .map(|user| user.as_bytes()) .collect::>() - .join(&[0xff][..]); + .join(&[0xFF][..]); - self.threadid_userids.insert(root_id, &users)?; + self.threadid_userids.insert(root_id.as_bytes(), &users)?; Ok(()) } - fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(root_id)? { + fn get_participants( + &self, + root_id: &PduId, + ) -> Result>> { + if let Some(users) = self.threadid_userids.get(root_id.as_bytes())? { Ok(Some( users - .split(|b| *b == 0xff) + .split(|b| *b == 0xFF) .map(|bytes| { - UserId::parse(utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Invalid UserId bytes in threadid_userids.") - })?) - .map_err(|_| Error::bad_database("Invalid UserId in threadid_userids.")) + UserId::parse(utils::string_from_bytes(bytes).map_err( + |_| { + Error::bad_database( + "Invalid UserId bytes in threadid_userids.", + ) + }, + )?) + .map_err(|_| { + Error::bad_database( + "Invalid UserId in threadid_userids.", + ) + }) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect(), )) } else { diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 0331a624..b46e9bfe 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,42 +1,18 @@ -use std::{collections::hash_map, mem::size_of, sync::Arc}; +use std::{mem::size_of, sync::Arc}; use ruma::{ - api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, + api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, + RoomId, UserId, }; -use tracing::error; - -use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; - use service::rooms::timeline::PduCount; -impl service::rooms::timeline::Data for KeyValueDatabase { - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(sender_user, room_id, PduCount::max())? - .find_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - { - Ok(*v.insert(last_count.0)) - } else { - Ok(PduCount::Normal(0)) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::timeline::PduId}, + services, utils, Error, PduEvent, Result, +}; +impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the `count` of this pdu's id. fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid @@ -46,14 +22,18 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - fn get_pdu_json(&self, event_id: &EventId) -> Result> { + fn get_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { self.get_non_outlier_pdu_json(event_id)?.map_or_else( || { self.eventid_outlierpdu .get(event_id.as_bytes())? .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) + serde_json::from_slice(&pdu).map_err(|_| { + Error::bad_database("Invalid PDU in db.") + }) }) .transpose() }, @@ -62,38 +42,46 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + }) }) .transpose()? .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } /// Returns the pdu's id. - fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) + fn get_pdu_id(&self, event_id: &EventId) -> Result> { + self.eventid_pduid.get(event_id.as_bytes()).map(|x| x.map(PduId::new)) } /// Returns the pdu. - fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + fn get_non_outlier_pdu( + &self, + event_id: &EventId, + ) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + }) }) .transpose()? .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } @@ -101,42 +89,31 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + #[tracing::instrument(skip(self))] fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self + Ok(self .get_non_outlier_pdu(event_id)? .map_or_else( || { self.eventid_outlierpdu .get(event_id.as_bytes())? .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) + serde_json::from_slice(&pdu).map_err(|_| { + Error::bad_database("Invalid PDU in db.") + }) }) .transpose() }, |x| Ok(Some(x)), )? - .map(Arc::new) - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } + .map(Arc::new)) } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + fn get_pdu_from_id(&self, pdu_id: &PduId) -> Result> { + self.pduid_pdu.get(pdu_id.as_bytes())?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, @@ -145,8 +122,11 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the pdu as a `BTreeMap`. - fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + fn get_pdu_json_from_id( + &self, + pdu_id: &PduId, + ) -> Result> { + self.pduid_pdu.get(pdu_id.as_bytes())?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) .map_err(|_| Error::bad_database("Invalid PDU in db."))?, @@ -156,22 +136,18 @@ impl service::rooms::timeline::Data for KeyValueDatabase { fn append_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, pdu: &PduEvent, json: &CanonicalJsonObject, - count: u64, ) -> Result<()> { self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + pdu_id.as_bytes(), + &serde_json::to_vec(json) + .expect("CanonicalJsonObject is always a valid"), )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), PduCount::Normal(count)); - - self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), pdu_id.as_bytes())?; self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; Ok(()) @@ -179,16 +155,17 @@ impl service::rooms::timeline::Data for KeyValueDatabase { fn prepend_backfill_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, event_id: &EventId, json: &CanonicalJsonObject, ) -> Result<()> { self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + pdu_id.as_bytes(), + &serde_json::to_vec(json) + .expect("CanonicalJsonObject is always a valid"), )?; - self.eventid_pduid.insert(event_id.as_bytes(), pdu_id)?; + self.eventid_pduid.insert(event_id.as_bytes(), pdu_id.as_bytes())?; self.eventid_outlierpdu.remove(event_id.as_bytes())?; Ok(()) @@ -197,14 +174,14 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Removes a pdu and creates a new one with the same id. fn replace_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, ) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { + if self.pduid_pdu.get(pdu_id.as_bytes())?.is_some() { self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu_json).expect("CanonicalJsonObject is always a valid"), + pdu_id.as_bytes(), + &serde_json::to_vec(pdu_json) + .expect("CanonicalJsonObject is always a valid"), )?; } else { return Err(Error::BadRequest( @@ -213,22 +190,19 @@ impl service::rooms::timeline::Data for KeyValueDatabase { )); } - self.pdu_cache - .lock() - .unwrap() - .remove(&(*pdu.event_id).to_owned()); - Ok(()) } - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. + /// Returns an iterator over all events and their tokens in a room that + /// happened before the event with id `until` in reverse-chronological + /// order. fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, until: PduCount, - ) -> Result> + 'a>> { + ) -> Result> + 'a>> + { let (prefix, current) = count_to_id(room_id, until, 1, true)?; let user_id = user_id.to_owned(); @@ -239,7 +213,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + .map_err(|_| { + Error::bad_database("PDU in db is invalid.") + })?; if pdu.sender != user_id { pdu.remove_transaction_id()?; } @@ -255,7 +231,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, from: PduCount, - ) -> Result> + 'a>> { + ) -> Result> + 'a>> + { let (prefix, current) = count_to_id(room_id, from, 1, false)?; let user_id = user_id.to_owned(); @@ -266,7 +243,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(move |(pdu_id, v)| { let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + .map_err(|_| { + Error::bad_database("PDU in db is invalid.") + })?; if pdu.sender != user_id { pdu.remove_transaction_id()?; } @@ -287,13 +266,13 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let mut highlights_batch = Vec::new(); for user in notifies { let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); notifies_batch.push(userroom_id); } for user in highlights { let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); highlights_batch.push(userroom_id); } @@ -308,10 +287,12 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the `count` of this pdu's id. fn pdu_count(pdu_id: &[u8]) -> Result { - let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; + let last_u64 = + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes."))?; let second_last_u64 = utils::u64_from_bytes( - &pdu_id[pdu_id.len() - 2 * size_of::()..pdu_id.len() - size_of::()], + &pdu_id[pdu_id.len() - 2 * size_of::() + ..pdu_id.len() - size_of::()], ); if matches!(second_last_u64, Ok(0)) { @@ -331,7 +312,10 @@ fn count_to_id( .rooms .short .get_shortroomid(room_id)? - .ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))? + .ok_or_else(|| { + Error::bad_database("Looked for bad shortroomid in timeline") + })? + .get() .to_be_bytes() .to_vec(); let mut pdu_id = prefix.clone(); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 4c435720..b474def3 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,14 +1,22 @@ use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::short::ShortStateHash}, + services, utils, Error, Result, +}; impl service::rooms::user::Data for KeyValueDatabase { - fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + fn reset_notification_counts( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); + roomuser_id.push(0xFF); roomuser_id.extend_from_slice(user_id.as_bytes()); self.userroomid_notificationcount @@ -24,37 +32,51 @@ impl service::rooms::user::Data for KeyValueDatabase { Ok(()) } - fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn notification_count( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) + self.userroomid_notificationcount.get(&userroom_id)?.map_or( + Ok(0), + |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid notification count in db.") + }) + }, + ) } - fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn highlight_count( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); + userroom_id.push(0xFF); userroom_id.extend_from_slice(room_id.as_bytes()); - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) + self.userroomid_highlightcount.get(&userroom_id)?.map_or( + Ok(0), + |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid highlight count in db.") + }) + }, + ) } - fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn last_notification_read( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(user_id.as_bytes()); Ok(self @@ -62,7 +84,9 @@ impl service::rooms::user::Data for KeyValueDatabase { .get(&key)? .map(|bytes| { utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + Error::bad_database( + "Count in roomuserid_lastprivatereadupdate is invalid.", + ) }) }) .transpose()? @@ -73,7 +97,7 @@ impl service::rooms::user::Data for KeyValueDatabase { &self, room_id: &RoomId, token: u64, - shortstatehash: u64, + shortstatehash: ShortStateHash, ) -> Result<()> { let shortroomid = services() .rooms @@ -81,29 +105,38 @@ impl service::rooms::user::Data for KeyValueDatabase { .get_shortroomid(room_id)? .expect("room exists"); - let mut key = shortroomid.to_be_bytes().to_vec(); + let mut key = shortroomid.get().to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) + .insert(&key, &shortstatehash.get().to_be_bytes()) } - fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + fn get_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + ) -> Result> { let shortroomid = services() .rooms .short .get_shortroomid(room_id)? .expect("room exists"); - let mut key = shortroomid.to_be_bytes().to_vec(); + let mut key = shortroomid.get().to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); self.roomsynctoken_shortstatehash .get(&key)? .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database( + "Invalid shortstatehash in \ + roomsynctoken_shortstatehash", + ) + }) + .map(ShortStateHash::new) }) .transpose() } @@ -114,35 +147,49 @@ impl service::rooms::user::Data for KeyValueDatabase { ) -> Result> + 'a>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); self.userroomid_joined .scan_prefix(prefix) .map(|(key, _)| { + // Plus one because the room id starts AFTER the separator let roomid_index = key .iter() .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? + .find(|(_, &b)| b == 0xFF) + .ok_or_else(|| { + Error::bad_database( + "Invalid userroomid_joined in db.", + ) + })? .0 - + 1; // +1 because the room id starts AFTER the separator + + 1; let room_id = key[roomid_index..].to_vec(); Ok::<_, Error>(room_id) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) }); - // We use the default compare function because keys are sorted correctly (not reversed) + // We use the default compare function because keys are sorted correctly + // (not reversed) Ok(Box::new( utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + RoomId::parse(utils::string_from_bytes(&bytes).map_err( + |_| { + Error::bad_database( + "Invalid RoomId bytes in userroomid_joined", + ) + }, + )?) + .map_err(|_| { + Error::bad_database( + "Invalid RoomId in userroomid_joined.", + ) + }) }), )) } diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index 3fc3e042..70f235b7 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -1,10 +1,11 @@ -use ruma::{ServerName, UserId}; +use ruma::{serde::Raw, ServerName, UserId}; use crate::{ database::KeyValueDatabase, service::{ self, - sending::{OutgoingKind, SendingEventType}, + rooms::timeline::PduId, + sending::{Destination, RequestKey, SendingEventType}, }, services, utils, Error, Result, }; @@ -12,32 +13,39 @@ use crate::{ impl service::sending::Data for KeyValueDatabase { fn active_requests<'a>( &'a self, - ) -> Box, OutgoingKind, SendingEventType)>> + 'a> { - Box::new( - self.servercurrentevent_data - .iter() - .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), - ) + ) -> Box< + dyn Iterator> + + 'a, + > { + Box::new(self.servercurrentevent_data.iter().map(|(key, v)| { + let key = RequestKey::new(key); + parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e)) + })) } fn active_requests_for<'a>( &'a self, - outgoing_kind: &OutgoingKind, - ) -> Box, SendingEventType)>> + 'a> { - let prefix = outgoing_kind.get_prefix(); - Box::new( - self.servercurrentevent_data - .scan_prefix(prefix) - .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), - ) + destination: &Destination, + ) -> Box> + 'a> + { + let prefix = destination.get_prefix(); + Box::new(self.servercurrentevent_data.scan_prefix(prefix).map( + |(key, v)| { + let key = RequestKey::new(key); + parse_servercurrentevent(&key, v).map(|(_, e)| (key, e)) + }, + )) } - fn delete_active_request(&self, key: Vec) -> Result<()> { - self.servercurrentevent_data.remove(&key) + fn delete_active_request(&self, key: RequestKey) -> Result<()> { + self.servercurrentevent_data.remove(key.as_bytes()) } - fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { - let prefix = outgoing_kind.get_prefix(); + fn delete_all_active_requests_for( + &self, + destination: &Destination, + ) -> Result<()> { + let prefix = destination.get_prefix(); for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { self.servercurrentevent_data.remove(&key)?; } @@ -45,161 +53,163 @@ impl service::sending::Data for KeyValueDatabase { Ok(()) } - fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { - let prefix = outgoing_kind.get_prefix(); - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { - self.servercurrentevent_data.remove(&key).unwrap(); - } - - for (key, _) in self.servernameevent_data.scan_prefix(prefix) { - self.servernameevent_data.remove(&key).unwrap(); - } - - Ok(()) - } - fn queue_requests( &self, - requests: &[(&OutgoingKind, SendingEventType)], - ) -> Result>> { + requests: &[(&Destination, &SendingEventType)], + ) -> Result> { let mut batch = Vec::new(); let mut keys = Vec::new(); - for (outgoing_kind, event) in requests { - let mut key = outgoing_kind.get_prefix(); + for (destination, event) in requests { + let mut key = destination.get_prefix(); if let SendingEventType::Pdu(value) = &event { - key.extend_from_slice(value) + key.extend_from_slice(value.as_bytes()); } else { - key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()) + key.extend_from_slice( + &services().globals.next_count()?.to_be_bytes(), + ); } let value = if let SendingEventType::Edu(value) = &event { - &**value + value.json().get().as_bytes() } else { &[] }; batch.push((key.clone(), value.to_owned())); - keys.push(key); + keys.push(RequestKey::new(key)); } - self.servernameevent_data - .insert_batch(&mut batch.into_iter())?; + self.servernameevent_data.insert_batch(&mut batch.into_iter())?; Ok(keys) } fn queued_requests<'a>( &'a self, - outgoing_kind: &OutgoingKind, - ) -> Box)>> + 'a> { - let prefix = outgoing_kind.get_prefix(); - return Box::new( - self.servernameevent_data - .scan_prefix(prefix) - .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), - ); + destination: &Destination, + ) -> Box> + 'a> + { + let prefix = destination.get_prefix(); + Box::new(self.servernameevent_data.scan_prefix(prefix).map(|(k, v)| { + let k = RequestKey::new(k); + parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k)) + })) } - fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()> { + fn mark_as_active( + &self, + events: &[(SendingEventType, RequestKey)], + ) -> Result<()> { for (e, key) in events { let value = if let SendingEventType::Edu(value) = &e { - &**value + value.json().get().as_bytes() } else { &[] }; - self.servercurrentevent_data.insert(key, value)?; - self.servernameevent_data.remove(key)?; + self.servercurrentevent_data.insert(key.as_bytes(), value)?; + self.servernameevent_data.remove(key.as_bytes())?; } Ok(()) } - fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { + fn set_latest_educount( + &self, + server_name: &ServerName, + last_count: u64, + ) -> Result<()> { self.servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes()) } fn get_latest_educount(&self, server_name: &ServerName) -> Result { - self.servername_educount - .get(server_name.as_bytes())? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) - }) + self.servername_educount.get(server_name.as_bytes())?.map_or( + Ok(0), + |bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid u64 in servername_educount.") + }) + }, + ) } } -#[tracing::instrument(skip(key))] +#[tracing::instrument(skip(key, value))] fn parse_servercurrentevent( - key: &[u8], + key: &RequestKey, value: Vec, -) -> Result<(OutgoingKind, SendingEventType)> { - // Appservices start with a plus - Ok::<_, Error>(if key.starts_with(b"+") { - let mut parts = key[1..].splitn(2, |&b| b == 0xff); +) -> Result<(Destination, SendingEventType)> { + let key = key.as_bytes(); + let (destination, event) = if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xFF); let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") + let event = parts.next().ok_or_else(|| { + Error::bad_database("Invalid bytes in servercurrentpdus.") })?; - ( - OutgoingKind::Appservice(server), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database( + "Invalid server bytes in server_currenttransaction", + ) + })?; + + (Destination::Appservice(server), event) } else if key.starts_with(b"$") { - let mut parts = key[1..].splitn(3, |&b| b == 0xff); + let mut parts = key[1..].splitn(3, |&b| b == 0xFF); let user = parts.next().expect("splitn always returns one element"); - let user_string = utils::string_from_bytes(user) - .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; - let user_id = UserId::parse(user_string) - .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; - - let pushkey = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let pushkey_string = utils::string_from_bytes(pushkey) - .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; - - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - ( - OutgoingKind::Push(user_id, pushkey_string), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - // I'm pretty sure this should never be called - SendingEventType::Edu(value) - }, - ) - } else { - let mut parts = key.splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") + let user_string = utils::string_from_bytes(user).map_err(|_| { + Error::bad_database("Invalid user string in servercurrentevent") + })?; + let user_id = UserId::parse(user_string).map_err(|_| { + Error::bad_database("Invalid user id in servercurrentevent") })?; - ( - OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - }) + let pushkey = parts.next().ok_or_else(|| { + Error::bad_database("Invalid bytes in servercurrentpdus.") + })?; + let pushkey_string = + utils::string_from_bytes(pushkey).map_err(|_| { + Error::bad_database("Invalid pushkey in servercurrentevent") + })?; + + let event = parts.next().ok_or_else(|| { + Error::bad_database("Invalid bytes in servercurrentpdus.") + })?; + + (Destination::Push(user_id, pushkey_string), event) + } else { + let mut parts = key.splitn(2, |&b| b == 0xFF); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts.next().ok_or_else(|| { + Error::bad_database("Invalid bytes in servercurrentpdus.") + })?; + + let server = utils::string_from_bytes(server) + .map_err(|_| { + Error::bad_database( + "Invalid server bytes in server_currenttransaction", + ) + })? + .try_into() + .map_err(|_| { + Error::bad_database( + "Invalid server string in server_currenttransaction", + ) + })?; + (Destination::Normal(server), event) + }; + + Ok(( + destination, + if value.is_empty() { + SendingEventType::Pdu(PduId::new(event.to_vec())) + } else { + SendingEventType::Edu( + Raw::from_json_string( + String::from_utf8(value) + .expect("EDU content in database should be a string"), + ) + .expect("EDU content in database should be valid JSON"), + ) + }, + )) } diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 2ea6ad4a..345b74a1 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -11,9 +11,11 @@ impl service::transaction_ids::Data for KeyValueDatabase { data: &[u8], ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); + key.push(0xFF); + key.extend_from_slice( + device_id.map(DeviceId::as_bytes).unwrap_or_default(), + ); + key.push(0xFF); key.extend_from_slice(txn_id.as_bytes()); self.userdevicetxnid_response.insert(&key, data)?; @@ -28,9 +30,11 @@ impl service::transaction_ids::Data for KeyValueDatabase { txn_id: &TransactionId, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); + key.push(0xFF); + key.extend_from_slice( + device_id.map(DeviceId::as_bytes).unwrap_or_default(), + ); + key.push(0xFF); key.extend_from_slice(txn_id.as_bytes()); // If there's no entry, this is a new transaction diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 5fd91b07..e97f915e 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,42 +1,11 @@ use ruma::{ api::client::{error::ErrorKind, uiaa::UiaaInfo}, - CanonicalJsonValue, DeviceId, UserId, + DeviceId, UserId, }; use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::uiaa::Data for KeyValueDatabase { - fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); - - Ok(()) - } - - fn get_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned()) - } - fn update_uiaa_session( &self, user_id: &UserId, @@ -45,19 +14,19 @@ impl service::uiaa::Data for KeyValueDatabase { uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); + userdevicesessionid.push(0xFF); userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); + userdevicesessionid.push(0xFF); userdevicesessionid.extend_from_slice(session.as_bytes()); if let Some(uiaainfo) = uiaainfo { self.userdevicesessionid_uiaainfo.insert( &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), + &serde_json::to_vec(&uiaainfo) + .expect("UiaaInfo::to_vec always works"), )?; } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; + self.userdevicesessionid_uiaainfo.remove(&userdevicesessionid)?; } Ok(()) @@ -70,9 +39,9 @@ impl service::uiaa::Data for KeyValueDatabase { session: &str, ) -> Result { let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); + userdevicesessionid.push(0xFF); userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); + userdevicesessionid.push(0xFF); userdevicesessionid.extend_from_slice(session.as_bytes()); serde_json::from_slice( @@ -80,10 +49,12 @@ impl service::uiaa::Data for KeyValueDatabase { .userdevicesessionid_uiaainfo .get(&userdevicesessionid)? .ok_or(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "UIAA session does not exist.", ))?, ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) + .map_err(|_| { + Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.") + }) } } diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 0e6db83a..4c2295de 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,9 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, - OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, + DeviceId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyName, + OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedOneTimeKeyId, OwnedUserId, + UInt, UserId, }; use tracing::warn; @@ -34,75 +35,108 @@ impl service::users::Data for KeyValueDatabase { .is_empty()) } - /// Returns the number of users registered on this server. + /// Returns the number of local and remote users known by this server. fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xff); + fn find_from_token( + &self, + token: &str, + ) -> Result> { + self.token_userdeviceid.get(token.as_bytes())?.map_or( + Ok(None), + |bytes| { + let mut parts = bytes.split(|&b| b == 0xFF); let user_bytes = parts.next().ok_or_else(|| { - Error::bad_database("User ID in token_userdeviceid is invalid.") + Error::bad_database( + "User ID in token_userdeviceid is invalid.", + ) })?; let device_bytes = parts.next().ok_or_else(|| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") + Error::bad_database( + "Device ID in token_userdeviceid is invalid.", + ) })?; Ok(Some(( - UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) + UserId::parse( + utils::string_from_bytes(user_bytes).map_err(|_| { + Error::bad_database( + "User ID in token_userdeviceid is invalid \ + unicode.", + ) + })?, + ) .map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid.") + Error::bad_database( + "User ID in token_userdeviceid is invalid.", + ) })?, utils::string_from_bytes(device_bytes).map_err(|_| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") + Error::bad_database( + "Device ID in token_userdeviceid is invalid.", + ) })?, ))) - }) + }, + ) } - /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box> + 'a> { + /// Returns an iterator over all local and remote users on this homeserver. + fn iter<'a>( + &'a self, + ) -> Box> + 'a> { Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") + Error::bad_database( + "User ID in userid_password is invalid unicode.", + ) })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) + .map_err(|_| { + Error::bad_database("User ID in userid_password is invalid.") + }) })) } /// Returns a list of local users as list of usernames. /// - /// A user account is considered `local` if the length of it's password is greater then zero. + /// A user account is considered `local` if the length of it's password is + /// greater then zero. fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() - .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) + .filter_map(|(username, pw)| { + get_username_with_valid_password(&username, &pw) + }) .collect(); Ok(users) } /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { + self.userid_password.get(user_id.as_bytes())?.map_or( + Ok(None), + |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") + Error::bad_database( + "Password hash in db is not valid string.", + ) })?)) - }) + }, + ) } /// Hash and set the user's password to the Argon2 hash - fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + fn set_password( + &self, + user_id: &UserId, + password: Option<&str>, + ) -> Result<()> { if let Some(password) = password { - if let Ok(hash) = utils::calculate_password_hash(password) { + if let Ok(hash) = utils::hash_password(password) { self.userid_password .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) @@ -118,19 +152,25 @@ impl service::users::Data for KeyValueDatabase { } } - /// Returns the displayname of a user on this homeserver. + /// Returns the `displayname` of a user on this homeserver. fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { + self.userid_displayname.get(user_id.as_bytes())?.map_or( + Ok(None), + |bytes| { Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Displayname in db is invalid.") })?)) - }) + }, + ) } - /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + /// Sets a new `displayname` or removes it if `displayname` is `None`. You + /// still need to nofify all rooms of this change. + fn set_displayname( + &self, + user_id: &UserId, + displayname: Option, + ) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname .insert(user_id.as_bytes(), displayname.as_bytes())?; @@ -141,23 +181,31 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - /// Get the avatar_url of a user. + /// Get the `avatar_url` of a user. fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) + .map_err(|_| { + Error::bad_database("Avatar URL in db is invalid.") + }) .map(Into::into) }) .transpose() } - /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + fn set_avatar_url( + &self, + user_id: &UserId, + avatar_url: Option, + ) -> Result<()> { if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; + self.userid_avatarurl.insert( + user_id.as_bytes(), + avatar_url.to_string().as_bytes(), + )?; } else { self.userid_avatarurl.remove(user_id.as_bytes())?; } @@ -165,21 +213,26 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - /// Get the blurhash of a user. + /// Get the `blurhash` of a user. fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; + let s = utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Avatar URL in db is invalid.") + })?; Ok(s) }) .transpose() } - /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + fn set_blurhash( + &self, + user_id: &UserId, + blurhash: Option, + ) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash .insert(user_id.as_bytes(), blurhash.as_bytes())?; @@ -198,22 +251,24 @@ impl service::users::Data for KeyValueDatabase { token: &str, initial_device_display_name: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. - assert!(self.exists(user_id)?); + assert!( + self.exists(user_id)?, + "user must exist before calling this method" + ); let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userid_devicelistversion - .increment(user_id.as_bytes())?; + self.userid_devicelistversion.increment(user_id.as_bytes())?; self.userdeviceid_metadata.insert( &userdeviceid, &serde_json::to_vec(&Device { device_id: device_id.into(), display_name: initial_device_display_name, - last_seen_ip: None, // TODO + // TODO + last_seen_ip: None, last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), }) .expect("Device::to_string never fails."), @@ -225,9 +280,13 @@ impl service::users::Data for KeyValueDatabase { } /// Removes a device from a user. - fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + fn remove_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); // Remove tokens @@ -238,7 +297,7 @@ impl service::users::Data for KeyValueDatabase { // Remove todevice events let mut prefix = userdeviceid.clone(); - prefix.push(0xff); + prefix.push(0xFF); for (key, _) in self.todeviceid_events.scan_prefix(prefix) { self.todeviceid_events.remove(&key)?; @@ -246,8 +305,7 @@ impl service::users::Data for KeyValueDatabase { // TODO: Remove onetimekeys - self.userid_devicelistversion - .increment(user_id.as_bytes())?; + self.userid_devicelistversion.increment(user_id.as_bytes())?; self.userdeviceid_metadata.remove(&userdeviceid)?; @@ -260,33 +318,40 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, ) -> Box> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); // All devices have metadata - Box::new( - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("UserDevice ID in db is invalid.") - })?, + Box::new(self.userdeviceid_metadata.scan_prefix(prefix).map( + |(bytes, _)| { + Ok(utils::string_from_bytes( + bytes.rsplit(|&b| b == 0xFF).next().ok_or_else(|| { + Error::bad_database("UserDevice ID in db is invalid.") + })?, + ) + .map_err(|_| { + Error::bad_database( + "Device ID in userdeviceid_metadata is invalid.", ) - .map_err(|_| { - Error::bad_database("Device ID in userdeviceid_metadata is invalid.") - })? - .into()) - }), - ) + })? + .into()) + }, + )) } /// Replaces the access token of one device. - fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + fn set_token( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); - // All devices have metadata - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); + assert!( + self.userdeviceid_metadata.get(&userdeviceid)?.is_some(), + "devices should have metadata" + ); // Remove old token if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { @@ -295,10 +360,8 @@ impl service::users::Data for KeyValueDatabase { } // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; + self.userdeviceid_token.insert(&userdeviceid, token.as_bytes())?; + self.token_userdeviceid.insert(token.as_bytes(), &userdeviceid)?; Ok(()) } @@ -307,29 +370,33 @@ impl service::users::Data for KeyValueDatabase { &self, user_id: &UserId, device_id: &DeviceId, - one_time_key_key: &DeviceKeyId, + one_time_key_key: &OwnedKeyId, one_time_key_value: &Raw, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(device_id.as_bytes()); - // All devices have metadata - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&key)?.is_some()); + assert!( + self.userdeviceid_metadata.get(&key)?.is_some(), + "devices should have metadata and this method should only be \ + called with existing devices" + ); - key.push(0xff); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) + key.push(0xFF); + // TODO: Use DeviceKeyId::to_string when it's available (and update + // everything, because there are no wrapping quotation marks + // anymore) key.extend_from_slice( serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") + .expect("OwnedKeyId::to_string always works") .as_bytes(), ); self.onetimekeyid_onetimekeys.insert( &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), + &serde_json::to_vec(&one_time_key_value) + .expect("OneTimeKey::to_vec always works"), )?; self.userid_lastonetimekeyupdate.insert( @@ -340,28 +407,18 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .unwrap_or(Ok(0)) - } - fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, - key_algorithm: &DeviceKeyAlgorithm, - ) -> Result)>> { + key_algorithm: &OneTimeKeyAlgorithm, + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark + prefix.push(0xFF); + // Annoying quotation mark + prefix.push(b'"'); prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); @@ -378,13 +435,18 @@ impl service::users::Data for KeyValueDatabase { Ok(( serde_json::from_slice( - key.rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, + key.rsplit(|&b| b == 0xFF).next().ok_or_else(|| { + Error::bad_database( + "OneTimeKeyId in db is invalid.", + ) + })?, ) - .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, + .map_err(|_| { + Error::bad_database("OneTimeKeyId in db is invalid.") + })?, + serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("OneTimeKeys in db are invalid.") + })?, )) }) .transpose() @@ -394,27 +456,33 @@ impl service::users::Data for KeyValueDatabase { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); let mut counts = BTreeMap::new(); - for algorithm in - self.onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::( - bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, - ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), + for algorithm in self + .onetimekeyid_onetimekeys + .scan_prefix(userdeviceid) + .map(|(bytes, _)| { + Ok::<_, Error>( + serde_json::from_slice::( + bytes.rsplit(|&b| b == 0xFF).next().ok_or_else( + || { + Error::bad_database( + "OneTimeKey ID in db is invalid.", + ) + }, + )?, ) - }) + .map_err(|_| { + Error::bad_database("DeviceKeyId in db is invalid.") + })? + .algorithm(), + ) + }) { *counts.entry(algorithm?).or_default() += UInt::from(1_u32); } @@ -422,6 +490,7 @@ impl service::users::Data for KeyValueDatabase { Ok(counts) } + #[tracing::instrument(skip(self, device_keys))] fn add_device_keys( &self, user_id: &UserId, @@ -429,12 +498,13 @@ impl service::users::Data for KeyValueDatabase { device_keys: &Raw, ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); self.keyid_key.insert( &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), + &serde_json::to_vec(&device_keys) + .expect("DeviceKeys::to_vec always works"), )?; self.mark_device_key_update(user_id)?; @@ -442,40 +512,49 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } + #[tracing::instrument(skip( + self, + master_key, + self_signing_key, + user_signing_key + ))] fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, - self_signing_key: &Option>, - user_signing_key: &Option>, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, notify: bool, ) -> Result<()> { // TODO: Check signatures let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let (master_key_key, _) = self.parse_master_key(user_id, master_key)?; self.keyid_key .insert(&master_key_key, master_key.json().get().as_bytes())?; - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; + self.userid_masterkeyid.insert(user_id.as_bytes(), &master_key_key)?; // Self-signing key if let Some(self_signing_key) = self_signing_key { let mut self_signing_key_ids = self_signing_key .deserialize() .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid self signing key", + ) })? .keys .into_values(); - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; + let self_signing_key_id = + self_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Self signing key contained no key.", + ))?; if self_signing_key_ids.next().is_some() { return Err(Error::BadRequest( @@ -485,7 +564,8 @@ impl service::users::Data for KeyValueDatabase { } let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + self_signing_key_key + .extend_from_slice(self_signing_key_id.as_bytes()); self.keyid_key.insert( &self_signing_key_key, @@ -501,15 +581,19 @@ impl service::users::Data for KeyValueDatabase { let mut user_signing_key_ids = user_signing_key .deserialize() .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid user signing key", + ) })? .keys .into_values(); - let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained no key.", - ))?; + let user_signing_key_id = + user_signing_key_ids.next().ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "User signing key contained no key.", + ))?; if user_signing_key_ids.next().is_some() { return Err(Error::BadRequest( @@ -519,7 +603,8 @@ impl service::users::Data for KeyValueDatabase { } let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + user_signing_key_key + .extend_from_slice(user_signing_key_id.as_bytes()); self.keyid_key.insert( &user_signing_key_key, @@ -545,32 +630,44 @@ impl service::users::Data for KeyValueDatabase { sender_id: &UserId, ) -> Result<()> { let mut key = target_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(key_id.as_bytes()); - let mut cross_signing_key: serde_json::Value = - serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( + let mut cross_signing_key: serde_json::Value = serde_json::from_slice( + &self.keyid_key.get(&key)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Tried to sign nonexistent key.", - ))?) - .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; + ))?, + ) + .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; let signatures = cross_signing_key .get_mut("signatures") - .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? + .ok_or_else(|| { + Error::bad_database("key in keyid_key has no signatures field.") + })? .as_object_mut() - .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? + .ok_or_else(|| { + Error::bad_database( + "key in keyid_key has invalid signatures field.", + ) + })? .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures .as_object_mut() - .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? + .ok_or_else(|| { + Error::bad_database( + "signatures in keyid_key for a user is invalid.", + ) + })? .insert(signature.0, signature.1.into()); self.keyid_key.insert( &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), + &serde_json::to_vec(&cross_signing_key) + .expect("CrossSigningKey::to_vec always works"), )?; self.mark_device_key_update(target_id)?; @@ -585,7 +682,7 @@ impl service::users::Data for KeyValueDatabase { to: Option, ) -> Box> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); let mut start = prefix.clone(); start.extend_from_slice(&(from + 1).to_be_bytes()); @@ -597,38 +694,52 @@ impl service::users::Data for KeyValueDatabase { .iter_from(&start, false) .take_while(move |(k, _)| { k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { + && if let Some(current) = + k.splitn(2, |&b| b == 0xFF).nth(1) + { if let Ok(c) = utils::u64_from_bytes(current) { c <= to } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + warn!( + "BadDatabase: Could not parse \ + keychangeid_userid bytes" + ); false } } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); + warn!( + "BadDatabase: Could not parse \ + keychangeid_userid" + ); false } }) .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "User ID in devicekeychangeid_userid is invalid unicode.", - ) - })?) + UserId::parse(utils::string_from_bytes(&bytes).map_err( + |_| { + Error::bad_database( + "User ID in devicekeychangeid_userid is \ + invalid unicode.", + ) + }, + )?) .map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid.") + Error::bad_database( + "User ID in devicekeychangeid_userid is invalid.", + ) }) }), ) } + #[tracing::instrument(skip(self))] fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { let count = services().globals.next_count()?.to_be_bytes(); for room_id in services() .rooms .state_cache .rooms_joined(user_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) { // Don't send key updates to unencrypted rooms if services() @@ -641,14 +752,14 @@ impl service::users::Data for KeyValueDatabase { } let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(&count); self.keychangeid_userid.insert(&key, user_id.as_bytes())?; } let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(&count); self.keychangeid_userid.insert(&key, user_id.as_bytes())?; @@ -661,7 +772,7 @@ impl service::users::Data for KeyValueDatabase { device_id: &DeviceId, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(device_id.as_bytes()); self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { @@ -677,11 +788,11 @@ impl service::users::Data for KeyValueDatabase { master_key: &Raw, ) -> Result<(Vec, CrossSigningKey)> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); - let master_key = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?; + let master_key = master_key.deserialize().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key") + })?; let mut master_key_ids = master_key.keys.values(); let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -706,8 +817,12 @@ impl service::users::Data for KeyValueDatabase { allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.keyid_key.get(key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + let mut cross_signing_key = serde_json::from_slice::< + serde_json::Value, + >(&bytes) + .map_err(|_| { + Error::bad_database("CrossSigningKey in db is invalid.") + })?; clean_signatures( &mut cross_signing_key, sender_user, @@ -748,16 +863,20 @@ impl service::users::Data for KeyValueDatabase { }) } - fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { + fn get_user_signing_key( + &self, + user_id: &UserId, + ) -> Result>> { + self.userid_usersigningkeyid.get(user_id.as_bytes())?.map_or( + Ok(None), + |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { Error::bad_database("CrossSigningKey in db is invalid.") })?)) }) - }) + }, + ) } fn add_to_device_event( @@ -769,9 +888,9 @@ impl service::users::Data for KeyValueDatabase { content: serde_json::Value, ) -> Result<()> { let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); let mut json = serde_json::Map::new(); @@ -779,7 +898,8 @@ impl service::users::Data for KeyValueDatabase { json.insert("sender".to_owned(), sender.to_string().into()); json.insert("content".to_owned(), content); - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); + let value = + serde_json::to_vec(&json).expect("Map::to_vec always works"); self.todeviceid_events.insert(&key, &value)?; @@ -794,15 +914,14 @@ impl service::users::Data for KeyValueDatabase { let mut events = Vec::new(); let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, - ); + events.push(serde_json::from_slice(&value).map_err(|_| { + Error::bad_database("Event in todeviceid_events is invalid.") + })?); } Ok(events) @@ -815,25 +934,32 @@ impl service::users::Data for KeyValueDatabase { until: u64, ) -> Result<()> { let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); + prefix.push(0xFF); prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + prefix.push(0xFF); let mut last = prefix.clone(); last.extend_from_slice(&until.to_be_bytes()); + // Include last for (key, _) in self .todeviceid_events - .iter_from(&last, true) // this includes last + .iter_from(&last, true) .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(key, _)| { Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(&key[key.len() - size_of::()..key.len()]) - .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, + utils::u64_from_bytes( + &key[key.len() - size_of::()..key.len()], + ) + .map_err(|_| { + Error::bad_database( + "ToDeviceId has invalid count bytes.", + ) + })?, )) }) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .take_while(|&(_, count)| count <= until) { self.todeviceid_events.remove(&key)?; @@ -849,18 +975,20 @@ impl service::users::Data for KeyValueDatabase { device: &Device, ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); + assert!( + self.userdeviceid_metadata.get(&userdeviceid)?.is_some(), + "this method should only be called with existing devices" + ); - self.userid_devicelistversion - .increment(user_id.as_bytes())?; + self.userid_devicelistversion.increment(user_id.as_bytes())?; self.userdeviceid_metadata.insert( &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), + &serde_json::to_vec(device) + .expect("Device::to_string always works"), )?; Ok(()) @@ -873,26 +1001,32 @@ impl service::users::Data for KeyValueDatabase { device_id: &DeviceId, ) -> Result> { let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); + userdeviceid.push(0xFF); userdeviceid.extend_from_slice(device_id.as_bytes()); - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { + self.userdeviceid_metadata.get(&userdeviceid)?.map_or( + Ok(None), + |bytes| { Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") + Error::bad_database( + "Metadata in userdeviceid_metadata is invalid.", + ) })?)) - }) + }, + ) } fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { + self.userid_devicelistversion.get(user_id.as_bytes())?.map_or( + Ok(None), + |bytes| { utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) + .map_err(|_| { + Error::bad_database("Invalid devicelistversion in db.") + }) .map(Some) - }) + }, + ) } fn all_devices_metadata<'a>( @@ -900,25 +1034,29 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, ) -> Box> + 'a> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); - Box::new( - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes).map_err(|_| { - Error::bad_database("Device in userdeviceid_metadata is invalid.") - }) - }), - ) + Box::new(self.userdeviceid_metadata.scan_prefix(key).map( + |(_, bytes)| { + serde_json::from_slice::(&bytes).map_err(|_| { + Error::bad_database( + "Device in userdeviceid_metadata is invalid.", + ) + }) + }, + )) } /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { + fn create_filter( + &self, + user_id: &UserId, + filter: &FilterDefinition, + ) -> Result { let filter_id = utils::random_string(4); let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(filter_id.as_bytes()); self.userfilterid_filter.insert( @@ -929,9 +1067,13 @@ impl service::users::Data for KeyValueDatabase { Ok(filter_id) } - fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result> { + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); + key.push(0xFF); key.extend_from_slice(filter_id.as_bytes()); let raw = self.userfilterid_filter.get(&key)?; @@ -945,13 +1087,14 @@ impl service::users::Data for KeyValueDatabase { } } -impl KeyValueDatabase {} - /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. -/// If utils::string_from_bytes(...) returns an error that username will be skipped -/// and the error will be logged. -fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { +/// If [`utils::string_from_bytes`] returns an error that username will be +/// skipped and the error will be logged. +fn get_username_with_valid_password( + username: &[u8], + password: &[u8], +) -> Option { // A valid password is not empty if password.is_empty() { None @@ -960,7 +1103,8 @@ fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option< Ok(u) => Some(u), Err(e) => { warn!( - "Failed to parse username while calling get_local_users(): {}", + "Failed to parse username while calling \ + get_local_users(): {}", e.to_string() ); None diff --git a/src/database/mod.rs b/src/database/mod.rs deleted file mode 100644 index 41da857c..00000000 --- a/src/database/mod.rs +++ /dev/null @@ -1,1129 +0,0 @@ -pub mod abstraction; -pub mod key_value; - -use crate::{ - service::rooms::timeline::PduCount, services, utils, Config, Error, PduEvent, Result, Services, - SERVICES, -}; -use abstraction::{KeyValueDatabaseEngine, KvTree}; -use directories::ProjectDirs; -use lru_cache::LruCache; - -use ruma::{ - events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, - room::message::RoomMessageEventContent, - GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, - }, - push::Ruleset, - CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, - UserId, -}; -use serde::Deserialize; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - fs::{self, remove_dir_all}, - io::Write, - mem::size_of, - path::Path, - sync::{Arc, Mutex, RwLock}, - time::Duration, -}; -use tokio::time::interval; - -use tracing::{debug, error, info, warn}; - -pub struct KeyValueDatabase { - _db: Arc, - - //pub globals: globals::Globals, - pub(super) global: Arc, - pub(super) server_signingkeys: Arc, - - //pub users: users::Users, - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count - - //pub uiaa: uiaa::Uiaa, - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock>, - - //pub edus: RoomEdus, - pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count - - //pub rooms: rooms::Rooms, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) threadid_userids: Arc, // ThreadId = RoomId + Count - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - pub(super) roomuserid_lastnotificationread: Arc, // LastNotificationRead = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// ShortEventId + ShortEventId -> (). - pub(super) tofrom_relation: Arc, - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - //pub account_data: account_data::AccountData, - pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type - pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type - - //pub media: media::Media, - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType - //pub key_backups: key_backups::KeyBackups, - pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId - - //pub transaction_ids: transaction_ids::TransactionIds, - pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) - //pub sending: sending::Sending, - pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content - - //pub appservice: appservice::Appservice, - pub(super) id_appserviceregistrations: Arc, - - //pub pusher: pusher::PushData, - pub(super) senderkey_pusher: Arc, - - pub(super) pdu_cache: Mutex>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock>>>, - pub(super) appservice_in_room_cache: RwLock>>, - pub(super) lasttimelinecount_cache: Mutex>, -} - -impl KeyValueDatabase { - /// Tries to remove the old database but ignores all errors. - pub fn try_remove(server_name: &str) -> Result<()> { - let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))? - .data_dir() - .to_path_buf(); - path.push(server_name); - let _ = remove_dir_all(path); - - Ok(()) - } - - fn check_db_setup(config: &Config) -> Result<()> { - let path = Path::new(&config.database_path); - - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - let rocksdb_exists = path.join("IDENTITY").exists(); - - let mut count = 0; - - if sled_exists { - count += 1; - } - - if sqlite_exists { - count += 1; - } - - if rocksdb_exists { - count += 1; - } - - if count > 1 { - warn!("Multiple databases at database_path detected"); - return Ok(()); - } - - if sled_exists && config.database_backend != "sled" { - return Err(Error::bad_config( - "Found sled at database_path, but is not specified in config.", - )); - } - - if sqlite_exists && config.database_backend != "sqlite" { - return Err(Error::bad_config( - "Found sqlite at database_path, but is not specified in config.", - )); - } - - if rocksdb_exists && config.database_backend != "rocksdb" { - return Err(Error::bad_config( - "Found rocksdb at database_path, but is not specified in config.", - )); - } - - Ok(()) - } - - /// Load an existing database or create a new one. - pub async fn load_or_create(config: Config) -> Result<()> { - Self::check_db_setup(&config)?; - - if !Path::new(&config.database_path).exists() { - std::fs::create_dir_all(&config.database_path) - .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; - } - - let builder: Arc = match &*config.database_backend { - "sqlite" => { - #[cfg(not(feature = "sqlite"))] - return Err(Error::BadConfig("Database backend not found.")); - #[cfg(feature = "sqlite")] - Arc::new(Arc::::open(&config)?) - } - "rocksdb" => { - #[cfg(not(feature = "rocksdb"))] - return Err(Error::BadConfig("Database backend not found.")); - #[cfg(feature = "rocksdb")] - Arc::new(Arc::::open(&config)?) - } - "persy" => { - #[cfg(not(feature = "persy"))] - return Err(Error::BadConfig("Database backend not found.")); - #[cfg(feature = "persy")] - Arc::new(Arc::::open(&config)?) - } - _ => { - return Err(Error::BadConfig("Database backend not found.")); - } - }; - - if config.registration_token == Some(String::new()) { - return Err(Error::bad_config("Registration token is empty")); - } - - if config.max_request_size < 1024 { - error!(?config.max_request_size, "Max request size is less than 1KB. Please increase it."); - } - - let db_raw = Box::new(Self { - _db: builder.clone(), - userid_password: builder.open_tree("userid_password")?, - userid_displayname: builder.open_tree("userid_displayname")?, - userid_avatarurl: builder.open_tree("userid_avatarurl")?, - userid_blurhash: builder.open_tree("userid_blurhash")?, - userdeviceid_token: builder.open_tree("userdeviceid_token")?, - userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, - userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, - token_userdeviceid: builder.open_tree("token_userdeviceid")?, - onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, - userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: builder.open_tree("keychangeid_userid")?, - keyid_key: builder.open_tree("keyid_key")?, - userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, - userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, - userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, - userfilterid_filter: builder.open_tree("userfilterid_filter")?, - todeviceid_events: builder.open_tree("todeviceid_events")?, - - userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: builder - .open_tree("roomuserid_lastprivatereadupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - pduid_pdu: builder.open_tree("pduid_pdu")?, - eventid_pduid: builder.open_tree("eventid_pduid")?, - roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, - - alias_roomid: builder.open_tree("alias_roomid")?, - aliasid_alias: builder.open_tree("aliasid_alias")?, - publicroomids: builder.open_tree("publicroomids")?, - - threadid_userids: builder.open_tree("threadid_userids")?, - - tokenids: builder.open_tree("tokenids")?, - - roomserverids: builder.open_tree("roomserverids")?, - serverroomids: builder.open_tree("serverroomids")?, - userroomid_joined: builder.open_tree("userroomid_joined")?, - roomuserid_joined: builder.open_tree("roomuserid_joined")?, - roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, - roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, - roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, - userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, - roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, - userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, - roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, - - disabledroomids: builder.open_tree("disabledroomids")?, - - lazyloadedids: builder.open_tree("lazyloadedids")?, - - userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, - userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, - roomuserid_lastnotificationread: builder.open_tree("userroomid_highlightcount")?, - - statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, - shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, - - shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, - - roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, - - shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, - eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, - shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, - roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, - roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, - statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, - - eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, - softfailedeventids: builder.open_tree("softfailedeventids")?, - - tofrom_relation: builder.open_tree("tofrom_relation")?, - referencedevents: builder.open_tree("referencedevents")?, - roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, - roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - mediaid_file: builder.open_tree("mediaid_file")?, - backupid_algorithm: builder.open_tree("backupid_algorithm")?, - backupid_etag: builder.open_tree("backupid_etag")?, - backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - servername_educount: builder.open_tree("servername_educount")?, - servernameevent_data: builder.open_tree("servernameevent_data")?, - servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - senderkey_pusher: builder.open_tree("senderkey_pusher")?, - global: builder.open_tree("global")?, - server_signingkeys: builder.open_tree("server_signingkeys")?, - - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shorteventid_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - eventidshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shortstatekey_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - statekeyshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - lasttimelinecount_cache: Mutex::new(HashMap::new()), - }); - - let db = Box::leak(db_raw); - - let services_raw = Box::new(Services::build(db, config)?); - - // This is the first and only time we initialize the SERVICE static - *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); - - // Matrix resource ownership is based on the server name; changing it - // requires recreating the database from scratch. - if services().users.count()? > 0 { - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); - - if !services().users.exists(&conduit_user)? { - error!( - "The {} server user does not exist, and the database is not new.", - conduit_user - ); - return Err(Error::bad_database( - "Cannot reuse an existing database after changing the server name, please delete the old one first." - )); - } - } - - // If the database has any data, perform data migrations before starting - let latest_database_version = 13; - - if services().users.count()? > 0 { - // MIGRATIONS - if services().globals.database_version()? < 1 { - for (roomserverid, _) in db.roomserverids.iter() { - let mut parts = roomserverid.split(|&b| b == 0xff); - let room_id = parts.next().expect("split always returns one element"); - let servername = match parts.next() { - Some(s) => s, - None => { - error!("Migration: Invalid roomserverid in db."); - continue; - } - }; - let mut serverroomid = servername.to_vec(); - serverroomid.push(0xff); - serverroomid.extend_from_slice(room_id); - - db.serverroomids.insert(&serverroomid, &[])?; - } - - services().globals.bump_database_version(1)?; - - warn!("Migration: 0 -> 1 finished"); - } - - if services().globals.database_version()? < 2 { - // We accidentally inserted hashed versions of "" into the db instead of just "" - for (userid, password) in db.userid_password.iter() { - let password = utils::string_from_bytes(&password); - - let empty_hashed_password = password.map_or(false, |password| { - argon2::verify_encoded(&password, b"").unwrap_or(false) - }); - - if empty_hashed_password { - db.userid_password.insert(&userid, b"")?; - } - } - - services().globals.bump_database_version(2)?; - - warn!("Migration: 1 -> 2 finished"); - } - - if services().globals.database_version()? < 3 { - // Move media to filesystem - for (key, content) in db.mediaid_file.iter() { - if content.is_empty() { - continue; - } - - let path = services().globals.get_media_file(&key); - let mut file = fs::File::create(path)?; - file.write_all(&content)?; - db.mediaid_file.insert(&key, &[])?; - } - - services().globals.bump_database_version(3)?; - - warn!("Migration: 2 -> 3 finished"); - } - - if services().globals.database_version()? < 4 { - // Add federated users to services() as deactivated - for our_user in services().users.iter() { - let our_user = our_user?; - if services().users.is_deactivated(&our_user)? { - continue; - } - for room in services().rooms.state_cache.rooms_joined(&our_user) { - for user in services().rooms.state_cache.room_members(&room?) { - let user = user?; - if user.server_name() != services().globals.server_name() { - info!(?user, "Migration: creating user"); - services().users.create(&user, None)?; - } - } - } - } - - services().globals.bump_database_version(4)?; - - warn!("Migration: 3 -> 4 finished"); - } - - if services().globals.database_version()? < 5 { - // Upgrade user data store - for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() { - let mut parts = roomuserdataid.split(|&b| b == 0xff); - let room_id = parts.next().unwrap(); - let user_id = parts.next().unwrap(); - let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap(); - - let mut key = room_id.to_vec(); - key.push(0xff); - key.extend_from_slice(user_id); - key.push(0xff); - key.extend_from_slice(event_type); - - db.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - } - - services().globals.bump_database_version(5)?; - - warn!("Migration: 4 -> 5 finished"); - } - - if services().globals.database_version()? < 6 { - // Set room member count - for (roomid, _) in db.roomid_shortstatehash.iter() { - let string = utils::string_from_bytes(&roomid).unwrap(); - let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); - services().rooms.state_cache.update_joined_count(room_id)?; - } - - services().globals.bump_database_version(6)?; - - warn!("Migration: 5 -> 6 finished"); - } - - if services().globals.database_version()? < 7 { - // Upgrade state store - let mut last_roomstates: HashMap = HashMap::new(); - let mut current_sstatehash: Option = None; - let mut current_room = None; - let mut current_state = HashSet::new(); - let mut counter = 0; - - let mut handle_state = - |current_sstatehash: u64, - current_room: &RoomId, - current_state: HashSet<_>, - last_roomstates: &mut HashMap<_, _>| { - counter += 1; - let last_roomsstatehash = last_roomstates.get(current_room); - - let states_parents = last_roomsstatehash.map_or_else( - || Ok(Vec::new()), - |&last_roomsstatehash| { - services() - .rooms - .state_compressor - .load_shortstatehash_info(last_roomsstatehash) - }, - )?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = current_state - .difference(&parent_stateinfo.1) - .copied() - .collect::>(); - - let statediffremoved = parent_stateinfo - .1 - .difference(¤t_state) - .copied() - .collect::>(); - - (statediffnew, statediffremoved) - } else { - (current_state, HashSet::new()) - }; - - services().rooms.state_compressor.save_state_from_diff( - current_sstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, // every state change is 2 event changes on average - states_parents, - )?; - - /* - let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?; - let state = tmp.pop().unwrap(); - println!( - "{}\t{}{:?}: {:?} + {:?} - {:?}", - current_room, - " ".repeat(tmp.len()), - utils::u64_from_bytes(¤t_sstatehash).unwrap(), - tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), - state - .2 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>(), - state - .3 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>() - ); - */ - - Ok::<_, Error>(()) - }; - - for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { - let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]) - .expect("number of bytes is correct"); - let sstatekey = k[size_of::()..].to_vec(); - if Some(sstatehash) != current_sstatehash { - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_deref().unwrap(), - current_state, - &mut last_roomstates, - )?; - last_roomstates - .insert(current_room.clone().unwrap(), current_sstatehash); - } - current_state = HashSet::new(); - current_sstatehash = Some(sstatehash); - - let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap(); - let string = utils::string_from_bytes(&event_id).unwrap(); - let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = services() - .rooms - .timeline - .get_pdu(event_id) - .unwrap() - .unwrap(); - - if Some(&pdu.room_id) != current_room.as_ref() { - current_room = Some(pdu.room_id.clone()); - } - } - - let mut val = sstatekey; - val.extend_from_slice(&seventid); - current_state.insert(val.try_into().expect("size is correct")); - } - - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_deref().unwrap(), - current_state, - &mut last_roomstates, - )?; - } - - services().globals.bump_database_version(7)?; - - warn!("Migration: 6 -> 7 finished"); - } - - if services().globals.database_version()? < 8 { - // Generate short room ids for all rooms - for (room_id, _) in db.roomid_shortstatehash.iter() { - let shortroomid = services().globals.next_count()?.to_be_bytes(); - db.roomid_shortroomid.insert(&room_id, &shortroomid)?; - info!("Migration: 8"); - } - // Update pduids db layout - let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(2, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = db - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_key = short_room_id; - new_key.extend_from_slice(count); - - Some((new_key, v)) - }); - - db.pduid_pdu.insert_batch(&mut batch)?; - - let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| { - if !value.starts_with(b"!") { - return None; - } - let mut parts = value.splitn(2, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = db - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_value = short_room_id; - new_value.extend_from_slice(count); - - Some((k, new_value)) - }); - - db.eventid_pduid.insert_batch(&mut batch2)?; - - services().globals.bump_database_version(8)?; - - warn!("Migration: 7 -> 8 finished"); - } - - if services().globals.database_version()? < 9 { - // Update tokenids db layout - let mut iter = db - .tokenids - .iter() - .filter_map(|(key, _)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(4, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let word = parts.next().unwrap(); - let _pdu_id_room = parts.next().unwrap(); - let pdu_id_count = parts.next().unwrap(); - - let short_room_id = db - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - let mut new_key = short_room_id; - new_key.extend_from_slice(word); - new_key.push(0xff); - new_key.extend_from_slice(pdu_id_count); - Some((new_key, Vec::new())) - }) - .peekable(); - - while iter.peek().is_some() { - db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; - debug!("Inserted smaller batch"); - } - - info!("Deleting starts"); - - let batch2: Vec<_> = db - .tokenids - .iter() - .filter_map(|(key, _)| { - if key.starts_with(b"!") { - Some(key) - } else { - None - } - }) - .collect(); - - for key in batch2 { - db.tokenids.remove(&key)?; - } - - services().globals.bump_database_version(9)?; - - warn!("Migration: 8 -> 9 finished"); - } - - if services().globals.database_version()? < 10 { - // Add other direction for shortstatekeys - for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() { - db.shortstatekey_statekey - .insert(&shortstatekey, &statekey)?; - } - - // Force E2EE device list updates so we can send them over federation - for user_id in services().users.iter().filter_map(|r| r.ok()) { - services().users.mark_device_key_update(&user_id)?; - } - - services().globals.bump_database_version(10)?; - - warn!("Migration: 9 -> 10 finished"); - } - - if services().globals.database_version()? < 11 { - db._db - .open_tree("userdevicesessionid_uiaarequest")? - .clear()?; - services().globals.bump_database_version(11)?; - - warn!("Migration: 10 -> 11 finished"); - } - - if services().globals.database_version()? < 12 { - for username in services().users.list_local_users()? { - let user = match UserId::parse_with_server_name( - username.clone(), - services().globals.server_name(), - ) { - Ok(u) => u, - Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - } - }; - - let raw_rules_list = services() - .account_data - .get( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - ) - .unwrap() - .expect("Username is invalid"); - - let mut account_data = - serde_json::from_str::(raw_rules_list.get()).unwrap(); - let rules_list = &mut account_data.content.global; - - //content rule - { - let content_rule_transformation = - [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; - - let rule = rules_list.content.get(content_rule_transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); - rule.rule_id = content_rule_transformation[1].to_owned(); - rules_list - .content - .shift_remove(content_rule_transformation[0]); - rules_list.content.insert(rule); - } - } - - //underride rules - { - let underride_rule_transformation = [ - [".m.rules.call", ".m.rule.call"], - [".m.rules.room_one_to_one", ".m.rule.room_one_to_one"], - [ - ".m.rules.encrypted_room_one_to_one", - ".m.rule.encrypted_room_one_to_one", - ], - [".m.rules.message", ".m.rule.message"], - [".m.rules.encrypted", ".m.rule.encrypted"], - ]; - - for transformation in underride_rule_transformation { - let rule = rules_list.underride.get(transformation[0]); - if let Some(rule) = rule { - let mut rule = rule.clone(); - rule.rule_id = transformation[1].to_owned(); - rules_list.underride.shift_remove(transformation[0]); - rules_list.underride.insert(rule); - } - } - } - - services().account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; - } - - services().globals.bump_database_version(12)?; - - warn!("Migration: 11 -> 12 finished"); - } - - // This migration can be reused as-is anytime the server-default rules are updated. - if services().globals.database_version()? < 13 { - for username in services().users.list_local_users()? { - let user = match UserId::parse_with_server_name( - username.clone(), - services().globals.server_name(), - ) { - Ok(u) => u, - Err(e) => { - warn!("Invalid username {username}: {e}"); - continue; - } - }; - - let raw_rules_list = services() - .account_data - .get( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - ) - .unwrap() - .expect("Username is invalid"); - - let mut account_data = - serde_json::from_str::(raw_rules_list.get()).unwrap(); - - let user_default_rules = ruma::push::Ruleset::server_default(&user); - account_data - .content - .global - .update_with_server_default(user_default_rules); - - services().account_data.update( - None, - &user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - )?; - } - - services().globals.bump_database_version(13)?; - - warn!("Migration: 12 -> 13 finished"); - } - - assert_eq!( - services().globals.database_version().unwrap(), - latest_database_version - ); - - info!( - "Loaded {} database with version {}", - services().globals.config.database_backend, - latest_database_version - ); - } else { - services() - .globals - .bump_database_version(latest_database_version)?; - - // Create the admin room and server user on first run - services().admin.create_admin_room().await?; - - warn!( - "Created new {} database with version {}", - services().globals.config.database_backend, - latest_database_version - ); - } - - // This data is probably outdated - db.presenceid_presence.clear()?; - - services().admin.start_handler(); - - // Set emergency access for the conduit user - match set_emergency_access() { - Ok(pwd_set) => { - if pwd_set { - warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); - services().admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); - } - } - Err(e) => { - error!( - "Could not set the configured emergency password for the conduit user: {}", - e - ) - } - }; - - services().sending.start_handler(); - - Self::start_cleanup_task().await; - if services().globals.allow_check_for_updates() { - Self::start_check_for_updates_task(); - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn flush(&self) -> Result<()> { - let start = std::time::Instant::now(); - - let res = self._db.flush(); - - debug!("flush: took {:?}", start.elapsed()); - - res - } - - #[tracing::instrument] - pub fn start_check_for_updates_task() { - tokio::spawn(async move { - let timer_interval = Duration::from_secs(60 * 60); - let mut i = interval(timer_interval); - loop { - i.tick().await; - let _ = Self::try_handle_updates().await; - } - }); - } - - async fn try_handle_updates() -> Result<()> { - let response = services() - .globals - .default_client() - .get("https://conduit.rs/check-for-updates/stable") - .send() - .await?; - - #[derive(Deserialize)] - struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, - } - #[derive(Deserialize)] - struct CheckForUpdatesResponse { - updates: Vec, - } - - let response = serde_json::from_str::(&response.text().await?) - .map_err(|_| Error::BadServerResponse("Bad version check response"))?; - - let mut last_update_id = services().globals.last_check_for_updates_id()?; - for update in response.updates { - last_update_id = last_update_id.max(update.id); - if update.id > services().globals.last_check_for_updates_id()? { - println!("{}", update.message); - services() - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "@room: The following is a message from the Conduit developers. It was sent on '{}':\n\n{}", - update.date, update.message - ))) - } - } - services() - .globals - .update_check_for_updates_id(last_update_id)?; - - Ok(()) - } - - #[tracing::instrument] - pub async fn start_cleanup_task() { - #[cfg(unix)] - use tokio::signal::unix::{signal, SignalKind}; - - use std::time::{Duration, Instant}; - - let timer_interval = - Duration::from_secs(services().globals.config.cleanup_second_interval as u64); - - tokio::spawn(async move { - let mut i = interval(timer_interval); - #[cfg(unix)] - let mut s = signal(SignalKind::hangup()).unwrap(); - - loop { - #[cfg(unix)] - tokio::select! { - _ = i.tick() => { - debug!("cleanup: Timer ticked"); - } - _ = s.recv() => { - debug!("cleanup: Received SIGHUP"); - } - }; - #[cfg(not(unix))] - { - i.tick().await; - debug!("cleanup: Timer ticked") - } - - let start = Instant::now(); - if let Err(e) = services().globals.cleanup() { - error!("cleanup: Errored: {}", e); - } else { - debug!("cleanup: Finished in {:?}", start.elapsed()); - } - } - }); - } -} - -/// Sets the emergency password and push rules for the @conduit account in case emergency password is set -fn set_emergency_access() -> Result { - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is a valid UserId"); - - services().users.set_password( - &conduit_user, - services().globals.emergency_password().as_deref(), - )?; - - let (ruleset, res) = match services().globals.emergency_password() { - Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), - None => (Ruleset::new(), Ok(false)), - }; - - services().account_data.update( - None, - &conduit_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(&GlobalAccountDataEvent { - content: PushRulesEventContent { global: ruleset }, - }) - .expect("to json value always works"), - )?; - - res -} diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 00000000..49cf7307 --- /dev/null +++ b/src/error.rs @@ -0,0 +1,191 @@ +//! Error handling facilities + +use std::{fmt, iter, path::PathBuf}; + +use thiserror::Error; + +use crate::config::ListenConfig; + +/// Formats an [`Error`][0] and its [`source`][1]s with a separator +/// +/// [0]: std::error::Error +/// [1]: std::error::Error::source +pub(crate) struct DisplayWithSources<'a> { + /// The error (and its sources) to write + pub(crate) error: &'a dyn std::error::Error, + + /// Separator to write between the original error and subsequent sources + pub(crate) infix: &'static str, +} + +impl fmt::Display for DisplayWithSources<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.error)?; + + let mut source = self.error.source(); + + source + .into_iter() + .chain(iter::from_fn(|| { + source = source.and_then(std::error::Error::source); + source + })) + .try_for_each(|source| write!(f, "{}{source}", self.infix)) + } +} + +/// Top-level errors +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum Main { + #[error(transparent)] + ServeCommand(#[from] ServeCommand), + + #[error("failed to install global default tracing subscriber")] + SetSubscriber(#[from] tracing::subscriber::SetGlobalDefaultError), + + #[error(transparent)] + CheckConfigCommand(#[from] CheckConfigCommand), +} + +/// Errors returned from the `serve` CLI subcommand. +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum ServeCommand { + #[error("failed to load configuration")] + Config(#[from] Config), + + #[error("failed to initialize observability")] + Observability(#[from] Observability), + + #[error("failed to load or create the database")] + DatabaseError(#[source] crate::utils::error::Error), + + #[error("failed to serve requests")] + Serve(#[from] Serve), + + #[error("failed to initialize services")] + InitializeServices(#[source] crate::utils::error::Error), + + #[error("`server_name` change check failed")] + ServerNameChanged(#[from] ServerNameChanged), +} + +/// Errors returned from the `check-config` CLI subcommand. +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum CheckConfigCommand { + #[error("failed to validate configuration")] + Config(#[from] Config), +} + +/// Error generated if `server_name` has changed or if checking this failed +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum ServerNameChanged { + #[error("failed to check if there are any users")] + NonZeroUsers(#[source] crate::utils::error::Error), + + #[error("failed to check if the admin bot exists")] + AdminBotExists(#[source] crate::utils::error::Error), + + #[error("`server_name` in the database and config file differ")] + Renamed, +} + +/// Observability initialization errors +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum Observability { + // Upstream's documentation on what this error means is very sparse + #[error("opentelemetry error")] + Otel(#[from] opentelemetry::trace::TraceError), + + #[error("failed to install global default tracing subscriber")] + SetSubscriber(#[from] tracing::subscriber::SetGlobalDefaultError), + + // Upstream's documentation on what this error means is very sparse + #[error("tracing_flame error")] + TracingFlame(#[from] tracing_flame::Error), +} + +/// Configuration errors +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum Config { + #[error("failed to find configuration file")] + Search(#[from] ConfigSearch), + + #[error("failed to read configuration file {1:?}")] + Read(#[source] std::io::Error, PathBuf), + + #[error("failed to parse configuration file {1:?}")] + Parse(#[source] toml::de::Error, PathBuf), + + #[error("failed to canonicalize path {}", .1.display())] + Canonicalize(#[source] std::io::Error, PathBuf), + + #[error("registration token must not be empty")] + RegistrationTokenEmpty, + + #[error("database and media paths overlap")] + DatabaseMediaOverlap, +} + +/// Errors that can occur while searching for a config file +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum ConfigSearch { + #[error("XDG Base Directory error")] + Xdg(#[from] xdg::BaseDirectoriesError), + + #[error("no relevant configuration files found in XDG Base Directories")] + NotFound, +} + +/// Errors serving traffic +// Missing docs are allowed here since that kind of information should be +// encoded in the error messages themselves anyway. +#[allow(missing_docs)] +#[derive(Error, Debug)] +pub(crate) enum Serve { + #[error("no listeners were specified in the configuration file")] + NoListeners, + + #[error( + "listener {0} requested TLS, but no TLS cert was specified in the \ + configuration file. Please set 'tls.certs' and 'tls.key'" + )] + NoTlsCerts(ListenConfig), + + #[error("failed to read TLS cert and key files at {certs:?} and {key:?}")] + LoadCerts { + certs: String, + key: String, + #[source] + err: std::io::Error, + }, + + #[error("failed to run request listener on {1}")] + Listen(#[source] std::io::Error, ListenConfig), + + #[error( + "federation self-test failed (set `federation.self_test = false` in \ + config to disable)" + )] + FederationSelfTestFailed(#[source] crate::Error), +} diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index 5a89f805..00000000 --- a/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -pub mod api; -pub mod clap; -mod config; -mod database; -mod service; -mod utils; - -// Not async due to services() being used in many closures, and async closures are not stable as of writing -// This is the case for every other occurence of sync Mutex/RwLock, except for database related ones, where -// the current maintainer (Timo) has asked to not modify those -use std::sync::RwLock; - -pub use api::ruma_wrapper::{Ruma, RumaResponse}; -pub use config::Config; -pub use database::KeyValueDatabase; -pub use service::{pdu::PduEvent, Services}; -pub use utils::error::{Error, Result}; - -pub static SERVICES: RwLock> = RwLock::new(None); - -pub fn services() -> &'static Services { - SERVICES - .read() - .unwrap() - .expect("SERVICES should be initialized when this is called") -} diff --git a/src/main.rs b/src/main.rs index 7beeb8ba..17ddfb2f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,120 +1,83 @@ -use std::{future::Future, io, net::SocketAddr, sync::atomic, time::Duration}; +// Avoid spurious warnings with --no-default-features, which isn't expected to +// work anyway +#![cfg_attr(not(any(feature = "sqlite", feature = "rocksdb")), allow(unused))] -use axum::{ - extract::{DefaultBodyLimit, FromRequestParts, MatchedPath}, - response::IntoResponse, - routing::{any, get, on, MethodFilter}, - Router, -}; -use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; -use conduit::api::{client_server, server_server}; -use figment::{ - providers::{Env, Format, Toml}, - Figment, -}; -use http::{ - header::{self, HeaderName}, - Method, StatusCode, Uri, -}; -use ruma::api::{ - client::{ - error::{Error as RumaError, ErrorBody, ErrorKind}, - uiaa::UiaaResponse, - }, - IncomingRequest, -}; -use tokio::signal; -use tower::ServiceBuilder; -use tower_http::{ - cors::{self, CorsLayer}, - trace::TraceLayer, - ServiceBuilderExt as _, -}; -use tracing::{debug, error, info, warn}; -use tracing_subscriber::{prelude::*, EnvFilter}; +use std::process::ExitCode; -pub use conduit::*; // Re-export everything from the library crate +use clap::Parser; +use tracing::{error, info}; -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -use tikv_jemallocator::Jemalloc; +mod api; +mod cli; +mod config; +mod database; +mod error; +mod observability; +mod service; +mod utils; -#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -#[global_allocator] -static GLOBAL: Jemalloc = Jemalloc; +pub(crate) use api::ruma_wrapper::{Ar, Ra}; +pub(crate) use config::Config; +pub(crate) use service::{pdu::PduEvent, services, Services}; +pub(crate) use utils::error::{Error, Result}; + +/// Returns the current version of the crate with extra info if supplied +/// +/// Set the environment variable `GRAPEVINE_VERSION_EXTRA` to any UTF-8 string +/// to include it in parenthesis after the SemVer version. A common value are +/// git commit hashes. +fn version() -> String { + let cargo_pkg_version = env!("CARGO_PKG_VERSION"); + + match option_env!("GRAPEVINE_VERSION_EXTRA") { + Some(x) => format!("{cargo_pkg_version} ({x})"), + None => cargo_pkg_version.to_owned(), + } +} + +#[derive(Debug, Clone, Copy)] +enum ApplicationState { + Ready, + Reloading, + Stopping, +} + +fn set_application_state(state: ApplicationState) { + info!(?state, "Application state changed"); + + #[cfg(feature = "systemd")] + { + use sd_notify::NotifyState; + + fn notify(states: &[NotifyState<'_>]) { + sd_notify::notify(false, states) + .expect("should be able to notify systemd"); + } + + match state { + ApplicationState::Ready => notify(&[NotifyState::Ready]), + ApplicationState::Reloading => { + let timespec = nix::time::clock_gettime( + nix::time::ClockId::CLOCK_MONOTONIC, + ) + .expect("CLOCK_MONOTONIC should be usable"); + let monotonic_usec = + timespec.tv_sec() * 1_000_000 + timespec.tv_nsec() / 1000; + + notify(&[ + NotifyState::Reloading, + NotifyState::Custom(&format!( + "MONOTONIC_USEC={monotonic_usec}", + )), + ]); + } + ApplicationState::Stopping => notify(&[NotifyState::Stopping]), + } + } +} #[tokio::main] -async fn main() { - clap::parse(); - - // Initialize config - let raw_config = - Figment::new() - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); - - let config = match raw_config.extract::() { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occurred: {e}"); - std::process::exit(1); - } - }; - - config.warn_deprecated(); - - if config.allow_jaeger { - opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_agent_pipeline() - .with_auto_split_batch(true) - .with_service_name("conduit") - .install_batch(opentelemetry::runtime::Tokio) - .unwrap(); - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - - let filter_layer = match EnvFilter::try_new(&config.log) { - Ok(s) => s, - Err(e) => { - eprintln!( - "It looks like your log config is invalid. The following error occurred: {e}" - ); - EnvFilter::try_new("warn").unwrap() - } - }; - - let subscriber = tracing_subscriber::Registry::default() - .with(filter_layer) - .with(telemetry); - tracing::subscriber::set_global_default(subscriber).unwrap(); - } else if config.tracing_flame { - let registry = tracing_subscriber::Registry::default(); - let (flame_layer, _guard) = - tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap(); - let flame_layer = flame_layer.with_empty_samples(false); - - let filter_layer = EnvFilter::new("trace,h2=off"); - - let subscriber = registry.with(filter_layer).with(flame_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); - } else { - let registry = tracing_subscriber::Registry::default(); - let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = match EnvFilter::try_new(&config.log) { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {e}"); - EnvFilter::try_new("warn").unwrap() - } - }; - - let subscriber = registry.with(filter_layer).with(fmt_layer); - tracing::subscriber::set_global_default(subscriber).unwrap(); - } - +async fn main() -> ExitCode { // This is needed for opening lots of file descriptors, which tends to // happen more often when using RocksDB and making lots of federation // connections at startup. The soft limit is usually 1024, and the hard @@ -123,467 +86,40 @@ async fn main() { // * https://www.freedesktop.org/software/systemd/man/systemd.exec.html#id-1.12.2.1.17.6 // * https://github.com/systemd/systemd/commit/0abf94923b4a95a7d89bc526efc84e7ca2b71741 #[cfg(unix)] - maximize_fd_limit().expect("should be able to increase the soft limit to the hard limit"); + maximize_fd_limit() + .expect("should be able to increase the soft limit to the hard limit"); - info!("Loading database"); - if let Err(error) = KeyValueDatabase::load_or_create(config).await { - error!(?error, "The database couldn't be loaded or created"); - - std::process::exit(1); + let args = cli::Args::parse(); + let Err(e) = args.run().await else { + return ExitCode::SUCCESS; }; - let config = &services().globals.config; - info!("Starting server"); - run_server().await.unwrap(); - - if config.allow_jaeger { - opentelemetry::global::shutdown_tracer_provider(); - } -} - -async fn run_server() -> io::Result<()> { - let config = &services().globals.config; - let addr = SocketAddr::from((config.address, config.port)); - - let x_requested_with = HeaderName::from_static("x-requested-with"); - - let middlewares = ServiceBuilder::new() - .sensitive_headers([header::AUTHORIZATION]) - .layer(axum::middleware::from_fn(spawn_task)) - .layer( - TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { - let path = if let Some(path) = request.extensions().get::() { - path.as_str() - } else { - request.uri().path() - }; - - tracing::info_span!("http_request", %path) - }), - ) - .layer(axum::middleware::from_fn(unrecognized_method)) - .layer( - CorsLayer::new() - .allow_origin(cors::Any) - .allow_methods([ - Method::GET, - Method::POST, - Method::PUT, - Method::DELETE, - Method::OPTIONS, - ]) - .allow_headers([ - header::ORIGIN, - x_requested_with, - header::CONTENT_TYPE, - header::ACCEPT, - header::AUTHORIZATION, - ]) - .max_age(Duration::from_secs(86400)), - ) - .layer(DefaultBodyLimit::max( - config - .max_request_size - .try_into() - .expect("failed to convert max request size"), - )); - - let app = routes(config).layer(middlewares).into_make_service(); - let handle = ServerHandle::new(); - - tokio::spawn(shutdown_signal(handle.clone())); - - match &config.tls { - Some(tls) => { - let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; - let server = bind_rustls(addr, conf).handle(handle).serve(app); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - - server.await? + eprintln!( + "Error: {}", + error::DisplayWithSources { + error: &e, + infix: "\n Caused by: " } - None => { - let server = bind(addr).handle(handle).serve(app); + ); - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - - server.await? - } - } - - Ok(()) -} - -async fn spawn_task( - req: axum::http::Request, - next: axum::middleware::Next, -) -> std::result::Result { - if services().globals.shutdown.load(atomic::Ordering::Relaxed) { - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - tokio::spawn(next.run(req)) - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) -} - -async fn unrecognized_method( - req: axum::http::Request, - next: axum::middleware::Next, -) -> std::result::Result { - let method = req.method().clone(); - let uri = req.uri().clone(); - let inner = next.run(req).await; - if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { - warn!("Method not allowed: {method} {uri}"); - return Ok(RumaResponse(UiaaResponse::MatrixError(RumaError { - body: ErrorBody::Standard { - kind: ErrorKind::Unrecognized, - message: "M_UNRECOGNIZED: Unrecognized request".to_owned(), - }, - status_code: StatusCode::METHOD_NOT_ALLOWED, - })) - .into_response()); - } - Ok(inner) -} - -fn routes(config: &Config) -> Router { - let router = Router::new() - .ruma_route(client_server::get_supported_versions_route) - .ruma_route(client_server::get_register_available_route) - .ruma_route(client_server::register_route) - .ruma_route(client_server::get_login_types_route) - .ruma_route(client_server::login_route) - .ruma_route(client_server::whoami_route) - .ruma_route(client_server::logout_route) - .ruma_route(client_server::logout_all_route) - .ruma_route(client_server::change_password_route) - .ruma_route(client_server::deactivate_route) - .ruma_route(client_server::third_party_route) - .ruma_route(client_server::request_3pid_management_token_via_email_route) - .ruma_route(client_server::request_3pid_management_token_via_msisdn_route) - .ruma_route(client_server::get_capabilities_route) - .ruma_route(client_server::get_pushrules_all_route) - .ruma_route(client_server::set_pushrule_route) - .ruma_route(client_server::get_pushrule_route) - .ruma_route(client_server::set_pushrule_enabled_route) - .ruma_route(client_server::get_pushrule_enabled_route) - .ruma_route(client_server::get_pushrule_actions_route) - .ruma_route(client_server::set_pushrule_actions_route) - .ruma_route(client_server::delete_pushrule_route) - .ruma_route(client_server::get_room_event_route) - .ruma_route(client_server::get_room_aliases_route) - .ruma_route(client_server::get_filter_route) - .ruma_route(client_server::create_filter_route) - .ruma_route(client_server::set_global_account_data_route) - .ruma_route(client_server::set_room_account_data_route) - .ruma_route(client_server::get_global_account_data_route) - .ruma_route(client_server::get_room_account_data_route) - .ruma_route(client_server::set_displayname_route) - .ruma_route(client_server::get_displayname_route) - .ruma_route(client_server::set_avatar_url_route) - .ruma_route(client_server::get_avatar_url_route) - .ruma_route(client_server::get_profile_route) - .ruma_route(client_server::set_presence_route) - .ruma_route(client_server::get_presence_route) - .ruma_route(client_server::upload_keys_route) - .ruma_route(client_server::get_keys_route) - .ruma_route(client_server::claim_keys_route) - .ruma_route(client_server::create_backup_version_route) - .ruma_route(client_server::update_backup_version_route) - .ruma_route(client_server::delete_backup_version_route) - .ruma_route(client_server::get_latest_backup_info_route) - .ruma_route(client_server::get_backup_info_route) - .ruma_route(client_server::add_backup_keys_route) - .ruma_route(client_server::add_backup_keys_for_room_route) - .ruma_route(client_server::add_backup_keys_for_session_route) - .ruma_route(client_server::delete_backup_keys_for_room_route) - .ruma_route(client_server::delete_backup_keys_for_session_route) - .ruma_route(client_server::delete_backup_keys_route) - .ruma_route(client_server::get_backup_keys_for_room_route) - .ruma_route(client_server::get_backup_keys_for_session_route) - .ruma_route(client_server::get_backup_keys_route) - .ruma_route(client_server::set_read_marker_route) - .ruma_route(client_server::create_receipt_route) - .ruma_route(client_server::create_typing_event_route) - .ruma_route(client_server::create_room_route) - .ruma_route(client_server::redact_event_route) - .ruma_route(client_server::report_event_route) - .ruma_route(client_server::create_alias_route) - .ruma_route(client_server::delete_alias_route) - .ruma_route(client_server::get_alias_route) - .ruma_route(client_server::join_room_by_id_route) - .ruma_route(client_server::join_room_by_id_or_alias_route) - .ruma_route(client_server::joined_members_route) - .ruma_route(client_server::leave_room_route) - .ruma_route(client_server::forget_room_route) - .ruma_route(client_server::joined_rooms_route) - .ruma_route(client_server::kick_user_route) - .ruma_route(client_server::ban_user_route) - .ruma_route(client_server::unban_user_route) - .ruma_route(client_server::invite_user_route) - .ruma_route(client_server::set_room_visibility_route) - .ruma_route(client_server::get_room_visibility_route) - .ruma_route(client_server::get_public_rooms_route) - .ruma_route(client_server::get_public_rooms_filtered_route) - .ruma_route(client_server::search_users_route) - .ruma_route(client_server::get_member_events_route) - .ruma_route(client_server::get_protocols_route) - .ruma_route(client_server::send_message_event_route) - .ruma_route(client_server::send_state_event_for_key_route) - .ruma_route(client_server::get_state_events_route) - .ruma_route(client_server::get_state_events_for_key_route) - // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes - // share one Ruma request / response type pair with {get,send}_state_event_for_key_route - .route( - "/_matrix/client/r0/rooms/:room_id/state/:event_type", - get(client_server::get_state_events_for_empty_key_route) - .put(client_server::send_state_event_for_empty_key_route), - ) - .route( - "/_matrix/client/v3/rooms/:room_id/state/:event_type", - get(client_server::get_state_events_for_empty_key_route) - .put(client_server::send_state_event_for_empty_key_route), - ) - // These two endpoints allow trailing slashes - .route( - "/_matrix/client/r0/rooms/:room_id/state/:event_type/", - get(client_server::get_state_events_for_empty_key_route) - .put(client_server::send_state_event_for_empty_key_route), - ) - .route( - "/_matrix/client/v3/rooms/:room_id/state/:event_type/", - get(client_server::get_state_events_for_empty_key_route) - .put(client_server::send_state_event_for_empty_key_route), - ) - .ruma_route(client_server::sync_events_route) - .ruma_route(client_server::sync_events_v4_route) - .ruma_route(client_server::get_context_route) - .ruma_route(client_server::get_message_events_route) - .ruma_route(client_server::search_events_route) - .ruma_route(client_server::turn_server_route) - .ruma_route(client_server::send_event_to_device_route) - .ruma_route(client_server::get_media_config_route) - .ruma_route(client_server::create_content_route) - .ruma_route(client_server::get_content_route) - .ruma_route(client_server::get_content_as_filename_route) - .ruma_route(client_server::get_content_thumbnail_route) - .ruma_route(client_server::get_devices_route) - .ruma_route(client_server::get_device_route) - .ruma_route(client_server::update_device_route) - .ruma_route(client_server::delete_device_route) - .ruma_route(client_server::delete_devices_route) - .ruma_route(client_server::get_tags_route) - .ruma_route(client_server::update_tag_route) - .ruma_route(client_server::delete_tag_route) - .ruma_route(client_server::upload_signing_keys_route) - .ruma_route(client_server::upload_signatures_route) - .ruma_route(client_server::get_key_changes_route) - .ruma_route(client_server::get_pushers_route) - .ruma_route(client_server::set_pushers_route) - // .ruma_route(client_server::third_party_route) - .ruma_route(client_server::upgrade_room_route) - .ruma_route(client_server::get_threads_route) - .ruma_route(client_server::get_relating_events_with_rel_type_and_event_type_route) - .ruma_route(client_server::get_relating_events_with_rel_type_route) - .ruma_route(client_server::get_relating_events_route) - .ruma_route(client_server::get_hierarchy_route) - .route( - "/_matrix/client/r0/rooms/:room_id/initialSync", - get(initial_sync), - ) - .route( - "/_matrix/client/v3/rooms/:room_id/initialSync", - get(initial_sync), - ) - .route("/", get(it_works)) - .fallback(not_found); - - if config.allow_federation { - router - .ruma_route(server_server::get_server_version_route) - .route( - "/_matrix/key/v2/server", - get(server_server::get_server_keys_route), - ) - .route( - "/_matrix/key/v2/server/:key_id", - get(server_server::get_server_keys_deprecated_route), - ) - .ruma_route(server_server::get_public_rooms_route) - .ruma_route(server_server::get_public_rooms_filtered_route) - .ruma_route(server_server::send_transaction_message_route) - .ruma_route(server_server::get_event_route) - .ruma_route(server_server::get_backfill_route) - .ruma_route(server_server::get_missing_events_route) - .ruma_route(server_server::get_event_authorization_route) - .ruma_route(server_server::get_room_state_route) - .ruma_route(server_server::get_room_state_ids_route) - .ruma_route(server_server::create_join_event_template_route) - .ruma_route(server_server::create_join_event_v1_route) - .ruma_route(server_server::create_join_event_v2_route) - .ruma_route(server_server::create_invite_route) - .ruma_route(server_server::get_devices_route) - .ruma_route(server_server::get_room_information_route) - .ruma_route(server_server::get_profile_information_route) - .ruma_route(server_server::get_keys_route) - .ruma_route(server_server::claim_keys_route) - } else { - router - .route("/_matrix/federation/*path", any(federation_disabled)) - .route("/_matrix/key/*path", any(federation_disabled)) - } -} - -async fn shutdown_signal(handle: ServerHandle) { - let ctrl_c = async { - signal::ctrl_c() - .await - .expect("failed to install Ctrl+C handler"); - }; - - #[cfg(unix)] - let terminate = async { - signal::unix::signal(signal::unix::SignalKind::terminate()) - .expect("failed to install signal handler") - .recv() - .await; - }; - - #[cfg(not(unix))] - let terminate = std::future::pending::<()>(); - - let sig: &str; - - tokio::select! { - _ = ctrl_c => { sig = "Ctrl+C"; }, - _ = terminate => { sig = "SIGTERM"; }, - } - - warn!("Received {}, shutting down...", sig); - handle.graceful_shutdown(Some(Duration::from_secs(30))); - - services().globals.shutdown(); - - #[cfg(feature = "systemd")] - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); -} - -async fn federation_disabled(_: Uri) -> impl IntoResponse { - Error::bad_config("Federation is disabled.") -} - -async fn not_found(uri: Uri) -> impl IntoResponse { - warn!("Not found: {uri}"); - Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") -} - -async fn initial_sync(_uri: Uri) -> impl IntoResponse { - Error::BadRequest( - ErrorKind::GuestAccessForbidden, - "Guest access not implemented", - ) -} - -async fn it_works() -> &'static str { - "Hello from Conduit!" -} - -trait RouterExt { - fn ruma_route(self, handler: H) -> Self - where - H: RumaHandler, - T: 'static; -} - -impl RouterExt for Router { - fn ruma_route(self, handler: H) -> Self - where - H: RumaHandler, - T: 'static, - { - handler.add_to_router(self) - } -} - -pub trait RumaHandler { - // Can't transform to a handler without boxing or relying on the nightly-only - // impl-trait-in-traits feature. Moving a small amount of extra logic into the trait - // allows bypassing both. - fn add_to_router(self, router: Router) -> Router; -} - -macro_rules! impl_ruma_handler { - ( $($ty:ident),* $(,)? ) => { - #[axum::async_trait] - #[allow(non_snake_case)] - impl RumaHandler<($($ty,)* Ruma,)> for F - where - Req: IncomingRequest + Send + 'static, - F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future> - + Send, - E: IntoResponse, - $( $ty: FromRequestParts<()> + Send + 'static, )* - { - fn add_to_router(self, mut router: Router) -> Router { - let meta = Req::METADATA; - let method_filter = method_to_filter(meta.method); - - for path in meta.history.all_paths() { - let handler = self.clone(); - - router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { - handler($($ty,)* req).await.map(RumaResponse) - })) - } - - router - } - } - }; -} - -impl_ruma_handler!(); -impl_ruma_handler!(T1); -impl_ruma_handler!(T1, T2); -impl_ruma_handler!(T1, T2, T3); -impl_ruma_handler!(T1, T2, T3, T4); -impl_ruma_handler!(T1, T2, T3, T4, T5); -impl_ruma_handler!(T1, T2, T3, T4, T5, T6); -impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); -impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); - -fn method_to_filter(method: Method) -> MethodFilter { - match method { - Method::DELETE => MethodFilter::DELETE, - Method::GET => MethodFilter::GET, - Method::HEAD => MethodFilter::HEAD, - Method::OPTIONS => MethodFilter::OPTIONS, - Method::PATCH => MethodFilter::PATCH, - Method::POST => MethodFilter::POST, - Method::PUT => MethodFilter::PUT, - Method::TRACE => MethodFilter::TRACE, - m => panic!("Unsupported HTTP method: {m:?}"), - } + ExitCode::FAILURE } #[cfg(unix)] #[tracing::instrument(err)] fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { use nix::sys::resource::{getrlimit, setrlimit, Resource}; + use tracing::debug; let res = Resource::RLIMIT_NOFILE; let (soft_limit, hard_limit) = getrlimit(res)?; - debug!("Current nofile soft limit: {soft_limit}"); + debug!(soft_limit, "Current nofile soft limit"); setrlimit(res, hard_limit, hard_limit)?; - debug!("Increased nofile soft limit to {hard_limit}"); + debug!(hard_limit, "Increased nofile soft limit to the hard limit"); Ok(()) } diff --git a/src/observability.rs b/src/observability.rs new file mode 100644 index 00000000..0f136306 --- /dev/null +++ b/src/observability.rs @@ -0,0 +1,559 @@ +//! Facilities for observing runtime behavior +#![warn(missing_docs, clippy::missing_docs_in_private_items)] + +use std::{ + collections::HashSet, + fs::File, + io::BufWriter, + sync::{Arc, LazyLock}, +}; + +use axum::{ + extract::{MatchedPath, Request}, + middleware::Next, + response::Response, +}; +use http::Method; +use opentelemetry::{metrics::MeterProvider, trace::TracerProvider, KeyValue}; +use opentelemetry_otlp::WithExportConfig; +use opentelemetry_sdk::{ + metrics::{new_view, Aggregation, Instrument, SdkMeterProvider, Stream}, + Resource, +}; +use strum::{AsRefStr, IntoStaticStr}; +use thiserror::Error; +use tokio::time::Instant; +use tracing::{subscriber::SetGlobalDefaultError, Span}; +use tracing_flame::{FlameLayer, FlushGuard}; +use tracing_opentelemetry::OtelData; +use tracing_subscriber::{ + layer::SubscriberExt, + registry::{LookupSpan, SpanData}, + reload, EnvFilter, Layer, Registry, +}; + +use crate::{ + config::{Config, EnvFilterClone, LogFormat}, + error, + utils::error::Result, +}; + +/// Globally accessible metrics state +pub(crate) static METRICS: LazyLock = LazyLock::new(Metrics::new); + +/// Cleans up resources relating to observability when [`Drop`]ped +pub(crate) struct Guard { + /// Drop guard used to flush [`tracing_flame`] data on exit + #[allow(dead_code)] + flame_guard: Option>>, +} + +impl Drop for Guard { + fn drop(&mut self) { + opentelemetry::global::shutdown_tracer_provider(); + } +} + +/// We need to store a [`reload::Handle`] value, but can't name it's type +/// explicitly because the S type parameter depends on the subscriber's previous +/// layers. In our case, this includes unnameable 'impl Trait' types. +/// +/// This is fixed[1] in the unreleased tracing-subscriber from the master +/// branch, which removes the S parameter. Unfortunately can't use it without +/// pulling in a version of tracing that's incompatible with the rest of our +/// deps. +/// +/// To work around this, we define an trait without the S paramter that forwards +/// to the [`reload::Handle::reload`] method, and then store the handle as a +/// trait object. +/// +/// [1]: https://github.com/tokio-rs/tracing/pull/1035/commits/8a87ea52425098d3ef8f56d92358c2f6c144a28f +pub(crate) trait ReloadHandle { + /// Replace the layer with a new value. See [`reload::Handle::reload`]. + fn reload(&self, new_value: L) -> Result<(), reload::Error>; +} + +impl ReloadHandle for reload::Handle { + fn reload(&self, new_value: L) -> Result<(), reload::Error> { + reload::Handle::reload(self, new_value) + } +} + +/// Error returned from [`FilterReloadHandle::set_filter()`] +#[allow(clippy::missing_docs_in_private_items)] +#[derive(Debug, Error)] +pub(crate) enum SetFilterError { + #[error("invalid filter string")] + InvalidFilter(#[from] tracing_subscriber::filter::ParseError), + #[error("failed to reload filter layer")] + Reload(#[from] reload::Error), +} + +/// A wrapper around a tracing filter [reload handle][reload::Handle] that +/// remembers the filter string that was last set. +pub(crate) struct FilterReloadHandle { + /// The actual [`reload::Handle`] that can be used to modify the filter + /// [`Layer`] + inner: Box + Send + Sync>, + /// Filter string that was last applied to `inner` + current_filter: String, + /// Filter string that was initially loaded from the configuration + initial_filter: String, +} + +impl FilterReloadHandle { + /// Creates a new [`FilterReloadHandle`] from a filter string, returning the + /// filter layer itself and the handle that can be used to modify it. + pub(crate) fn new( + filter: EnvFilterClone, + ) -> (impl tracing_subscriber::layer::Filter, Self) { + let (layer, handle) = reload::Layer::new(EnvFilter::from(&filter)); + let handle = Self { + inner: Box::new(handle), + current_filter: filter.0.clone(), + initial_filter: filter.0, + }; + (layer, handle) + } + + /// Sets the filter string for the linked filter layer. Can fail if the + /// filter string is invalid or when the link to the layer has been + /// broken. + pub(crate) fn set_filter( + &mut self, + filter: String, + ) -> Result<(), SetFilterError> { + self.inner.reload(filter.parse()?)?; + self.current_filter = filter; + Ok(()) + } + + /// Returns the filter string that the underlying filter layer is currently + /// configured for. + pub(crate) fn get_filter(&self) -> &str { + &self.current_filter + } + + /// Returns the filter string that the underlying filter layer was + /// initialized with. + pub(crate) fn get_initial_filter(&self) -> &str { + &self.initial_filter + } +} + +/// Collection of [`FilterReloadHandle`]s, allowing the filters for tracing +/// backends to be changed dynamically. Handles may be [`None`] if the backend +/// is disabled in the config. +#[allow(clippy::missing_docs_in_private_items)] +pub(crate) struct FilterReloadHandles { + pub(crate) traces: Option, + pub(crate) flame: Option, + pub(crate) log: Option, +} + +/// A kind of data that gets looked up +/// +/// See also [`Metrics::record_lookup`]. +// Keep variants sorted +#[allow(clippy::missing_docs_in_private_items)] +#[derive(Clone, Copy, AsRefStr, IntoStaticStr)] +pub(crate) enum Lookup { + AppserviceInRoom, + AuthChain, + CreateEventIdToShort, + CreateStateKeyToShort, + FederationDestination, + LastTimelineCount, + OurRealUsers, + Pdu, + ShortToEventId, + ShortToStateKey, + StateInfo, + StateKeyToShort, + VisibilityForServer, + VisibilityForUser, +} + +/// Locations where a [`Lookup`] value may be found +/// +/// Not all of these variants are used for each value of [`Lookup`]. +#[derive(Clone, Copy, AsRefStr, IntoStaticStr)] +pub(crate) enum FoundIn { + /// Found in cache + Cache, + /// Cache miss, but it was in the database. The cache has been updated. + Database, + /// Cache and database miss, but another server had it. The cache has been + /// updated. + Remote, + /// The entry could not be found anywhere. + Nothing, +} + +/// Wrapper for the creation of a `tracing` [`Layer`] and any associated opaque +/// data. +/// +/// Returns a no-op `None` layer if `enable` is `false`, otherwise calls the +/// given closure to construct the layer and associated data, then applies the +/// filter to the layer. +fn make_backend( + enable: bool, + filter: &EnvFilterClone, + init: impl FnOnce() -> Result<(L, T), error::Observability>, +) -> Result< + (impl Layer, Option, Option), + error::Observability, +> +where + L: Layer, + S: tracing::Subscriber + for<'span> LookupSpan<'span>, +{ + if !enable { + return Ok((None, None, None)); + } + + let (filter, handle) = FilterReloadHandle::new(filter.clone()); + let (layer, data) = init()?; + Ok((Some(layer.with_filter(filter)), Some(handle), Some(data))) +} + +/// Initialize observability +pub(crate) fn init( + config: &Config, +) -> Result<(Guard, FilterReloadHandles), error::Observability> { + let (traces_layer, traces_filter, _) = make_backend( + config.observability.traces.enable, + &config.observability.traces.filter, + || { + opentelemetry::global::set_text_map_propagator( + opentelemetry_jaeger_propagator::Propagator::new(), + ); + let mut exporter = opentelemetry_otlp::new_exporter().tonic(); + if let Some(endpoint) = &config.observability.traces.endpoint { + exporter = exporter.with_endpoint(endpoint); + } + let tracer_provider = opentelemetry_otlp::new_pipeline() + .tracing() + .with_trace_config( + opentelemetry_sdk::trace::Config::default().with_resource( + standard_resource( + config.observability.traces.service_name.clone(), + ), + ), + ) + .with_exporter(exporter) + .install_batch(opentelemetry_sdk::runtime::Tokio)?; + + // The passed value sets the library name, and `""` seems to be + // morally equivalent to passing `None`, which is probably fine + // because what other library is there to use for this anyway? + // + // Prior to opentelemetry v0.24, this value was set for us by the + // opentelemetry-otlp crate. Trying to automate getting the right + // values doesn't seem worth it, as alluded to above. + let tracer = tracer_provider.tracer(""); + + opentelemetry::global::set_tracer_provider(tracer_provider); + + Ok((tracing_opentelemetry::layer().with_tracer(tracer), ())) + }, + )?; + + let (flame_layer, flame_filter, flame_guard) = make_backend( + config.observability.flame.enable, + &config.observability.flame.filter, + || { + let (flame_layer, guard) = + FlameLayer::with_file(&config.observability.flame.filename)?; + Ok((flame_layer.with_empty_samples(false), guard)) + }, + )?; + + let (log_layer, log_filter, _) = + make_backend(true, &config.observability.logs.filter, || { + /// Time format selection for `tracing_subscriber` at runtime + #[allow(clippy::missing_docs_in_private_items)] + enum TimeFormat { + SystemTime, + NoTime, + } + impl tracing_subscriber::fmt::time::FormatTime for TimeFormat { + fn format_time( + &self, + w: &mut tracing_subscriber::fmt::format::Writer<'_>, + ) -> std::fmt::Result { + match self { + TimeFormat::SystemTime => { + tracing_subscriber::fmt::time::SystemTime + .format_time(w) + } + TimeFormat::NoTime => Ok(()), + } + } + } + + let fmt_layer = tracing_subscriber::fmt::Layer::new() + .with_ansi(config.observability.logs.colors) + .with_timer(if config.observability.logs.timestamp { + TimeFormat::SystemTime + } else { + TimeFormat::NoTime + }); + let fmt_layer = match config.observability.logs.format { + LogFormat::Pretty => fmt_layer.pretty().boxed(), + LogFormat::Full => fmt_layer.boxed(), + LogFormat::Compact => fmt_layer.compact().boxed(), + LogFormat::Json => fmt_layer.json().boxed(), + }; + Ok((fmt_layer, ())) + })?; + + let subscriber = Registry::default() + .with(traces_layer) + .with(flame_layer) + .with(log_layer); + tracing::subscriber::set_global_default(subscriber)?; + + Ok(( + Guard { + flame_guard, + }, + FilterReloadHandles { + traces: traces_filter, + flame: flame_filter, + log: log_filter, + }, + )) +} + +/// Construct the standard [`Resource`] value to use for this service +fn standard_resource(service_name: String) -> Resource { + Resource::default() + .merge(&Resource::new([KeyValue::new("service.name", service_name)])) +} + +/// Holds state relating to metrics +pub(crate) struct Metrics { + /// Internal state for OpenTelemetry metrics + /// + /// We never directly read from [`SdkMeterProvider`], but it needs to + /// outlive all calls to `self.otel_state.0.gather()`, otherwise + /// metrics collection will fail. + otel_state: (prometheus::Registry, SdkMeterProvider), + + /// Histogram of HTTP requests + http_requests_histogram: opentelemetry::metrics::Histogram, + + /// Counts where data is found from + lookup: opentelemetry::metrics::Counter, + + /// Number of entries in an + /// [`OnDemandHashMap`](crate::utils::on_demand_hashmap::OnDemandHashMap) + on_demand_hashmap_size: opentelemetry::metrics::Gauge, +} + +impl Metrics { + /// Initializes metric-collecting and exporting facilities + fn new() -> Self { + // Metric names + let http_requests_histogram_name = "http.requests"; + + // Set up OpenTelemetry state + let registry = prometheus::Registry::new(); + let exporter = opentelemetry_prometheus::exporter() + .with_registry(registry.clone()) + .build() + .expect("exporter configuration should be valid"); + let provider = SdkMeterProvider::builder() + .with_reader(exporter) + .with_view( + new_view( + Instrument::new().name(http_requests_histogram_name), + Stream::new().aggregation( + Aggregation::ExplicitBucketHistogram { + boundaries: vec![ + 0., 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, + 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, + 0.8, 0.9, 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 20., 30., 40., 50., + ], + record_min_max: true, + }, + ), + ) + .expect("view should be valid"), + ) + .with_resource(standard_resource(env!("CARGO_PKG_NAME").to_owned())) + .build(); + let meter = provider.meter(env!("CARGO_PKG_NAME")); + + // Define metrics + + let http_requests_histogram = meter + .f64_histogram(http_requests_histogram_name) + .with_unit("seconds") + .with_description("Histogram of HTTP requests") + .init(); + + let lookup = meter + .u64_counter("lookup") + .with_description("Counts where data is found from") + .init(); + + let on_demand_hashmap_size = meter + .u64_gauge("on_demand_hashmap_size") + .with_description("Number of entries in OnDemandHashMap") + .init(); + + Metrics { + otel_state: (registry, provider), + http_requests_histogram, + lookup, + on_demand_hashmap_size, + } + } + + /// Export metrics to a string suitable for consumption by e.g. Prometheus + pub(crate) fn export(&self) -> String { + prometheus::TextEncoder::new() + .encode_to_string(&self.otel_state.0.gather()) + .expect("should be able to encode metrics") + } + + /// Record that some data was found in a particular storage location + pub(crate) fn record_lookup(&self, lookup: Lookup, found_in: FoundIn) { + self.lookup.add( + 1, + &[ + KeyValue::new("lookup", <&str>::from(lookup)), + KeyValue::new("found_in", <&str>::from(found_in)), + ], + ); + } + + /// Record size of [`OnDemandHashMap`] + /// + /// [`OnDemandHashMap`]: crate::utils::on_demand_hashmap::OnDemandHashMap + pub(crate) fn record_on_demand_hashmap_size( + &self, + name: Arc, + size: usize, + ) { + self.on_demand_hashmap_size.record( + size.try_into().unwrap_or(u64::MAX), + &[KeyValue::new("name", name)], + ); + } +} + +/// Track HTTP metrics by converting this into an [`axum`] layer +pub(crate) async fn http_metrics_layer(req: Request, next: Next) -> Response { + /// Routes that should not be included in the metrics + static IGNORED_ROUTES: LazyLock> = + LazyLock::new(|| [(&Method::GET, "/metrics")].into_iter().collect()); + + let matched_path = + req.extensions().get::().map(|x| x.as_str().to_owned()); + + let method = req.method().to_owned(); + + match matched_path { + // Run the next layer if the route should be ignored + Some(matched_path) + if IGNORED_ROUTES.contains(&(&method, matched_path.as_str())) => + { + next.run(req).await + } + + // Run the next layer if the route is unknown + None => next.run(req).await, + + // Otherwise, run the next layer and record metrics + Some(matched_path) => { + let start = Instant::now(); + let resp = next.run(req).await; + let elapsed = start.elapsed(); + + let status_code = resp.status().as_str().to_owned(); + + let attrs = &[ + KeyValue::new("method", method.as_str().to_owned()), + KeyValue::new("path", matched_path), + KeyValue::new("status_code", status_code), + ]; + + METRICS + .http_requests_histogram + .record(elapsed.as_secs_f64(), attrs); + + resp + } + } +} + +/// Add `traceresponse` header if possible +/// +/// See also . +pub(crate) async fn traceresponse_layer(req: Request, next: Next) -> Response { + let mut resp = next.run(req).await; + + let ids = tracing::dispatcher::get_default(|dispatch| { + Span::current() + .id() + .and_then(|id| { + dispatch + .downcast_ref::() + .and_then(|x| x.span_data(&id)) + }) + .and_then(|x| { + x.extensions() + .get::() + .and_then(|x| x.builder.trace_id.zip(x.builder.span_id)) + }) + }); + + if let Some((trace_id, span_id)) = ids { + let headers = resp.headers_mut(); + + headers.insert( + "traceresponse", + format!( + "{:02x}-{}-{}-{:02x}", + 0, + trace_id, + span_id, + // Doesn't seem to be possible to get the SpanContext here, but + // this should be a fine default value + 0, + ) + .try_into() + .expect("traceresponse value should be a valid header value"), + ); + } + + resp +} + +/// Set up observability for CLI-oriented subcommands. +/// +/// Tracing spans and events will be sent to `stderr`. +pub(crate) fn init_for_cli( + log_format: LogFormat, + env_filter: EnvFilter, +) -> Result<(), SetGlobalDefaultError> { + let log_layer = + tracing_subscriber::fmt::Layer::new().with_writer(std::io::stderr); + + let log_layer = match log_format { + LogFormat::Pretty => log_layer.pretty().boxed(), + LogFormat::Full => log_layer.boxed(), + LogFormat::Compact => log_layer.compact().boxed(), + LogFormat::Json => log_layer.json().boxed(), + }; + + let log_layer = log_layer.with_filter(env_filter); + + let subscriber = Registry::default().with(log_layer); + + tracing::subscriber::set_global_default(subscriber) +} diff --git a/src/service.rs b/src/service.rs new file mode 100644 index 00000000..38d44941 --- /dev/null +++ b/src/service.rs @@ -0,0 +1,136 @@ +use std::sync::{Arc, OnceLock}; + +use crate::{observability::FilterReloadHandles, Config, Result}; + +pub(crate) mod account_data; +pub(crate) mod admin; +pub(crate) mod appservice; +pub(crate) mod globals; +pub(crate) mod key_backups; +pub(crate) mod media; +pub(crate) mod pdu; +pub(crate) mod pusher; +pub(crate) mod rooms; +pub(crate) mod sending; +pub(crate) mod transaction_ids; +pub(crate) mod uiaa; +pub(crate) mod users; + +static SERVICES: OnceLock<&'static Services> = OnceLock::new(); + +/// Convenient access to the global [`Services`] instance +pub(crate) fn services() -> &'static Services { + SERVICES.get().expect("`Services::install` should have been called first") +} + +pub(crate) struct Services { + pub(crate) appservice: appservice::Service, + pub(crate) pusher: pusher::Service, + pub(crate) rooms: rooms::Service, + pub(crate) transaction_ids: transaction_ids::Service, + pub(crate) uiaa: uiaa::Service, + pub(crate) users: users::Service, + pub(crate) account_data: account_data::Service, + pub(crate) admin: Arc, + pub(crate) globals: globals::Service, + pub(crate) key_backups: key_backups::Service, + pub(crate) media: media::Service, + pub(crate) sending: Arc, +} + +impl Services { + #[allow(clippy::too_many_lines)] + pub(crate) fn new< + D: appservice::Data + + pusher::Data + + rooms::Data + + transaction_ids::Data + + uiaa::Data + + users::Data + + account_data::Data + + globals::Data + + key_backups::Data + + media::Data + + sending::Data + + 'static, + >( + db: &'static D, + config: Config, + reload_handles: Option, + ) -> Result { + Ok(Self { + appservice: appservice::Service::new(db)?, + pusher: pusher::Service { + db, + }, + rooms: rooms::Service { + alias: rooms::alias::Service::new(db), + auth_chain: rooms::auth_chain::Service::new( + db, + config.cache.auth_chain, + ), + directory: db, + edus: rooms::edus::Service { + read_receipt: db, + typing: rooms::edus::typing::Service::new(), + }, + event_handler: rooms::event_handler::Service, + lazy_loading: rooms::lazy_loading::Service::new(db), + metadata: db, + outlier: db, + pdu_metadata: rooms::pdu_metadata::Service { + db, + }, + search: db, + short: rooms::short::Service::new( + db, + config.cache.short_eventid, + config.cache.eventid_short, + config.cache.statekey_short, + config.cache.short_statekey, + ), + state: rooms::state::Service { + db, + }, + state_accessor: rooms::state_accessor::Service::new( + db, + config.cache.server_visibility, + config.cache.user_visibility, + ), + state_cache: rooms::state_cache::Service::new(db), + state_compressor: rooms::state_compressor::Service::new( + db, + config.cache.state_info, + ), + timeline: rooms::timeline::Service::new(db, config.cache.pdu), + threads: rooms::threads::Service { + db, + }, + spaces: rooms::spaces::Service::new( + config.cache.roomid_spacechunk, + ), + user: db, + }, + transaction_ids: db, + uiaa: uiaa::Service::new(db), + users: users::Service::new(db), + account_data: account_data::Service::new(db), + admin: admin::Service::new(), + key_backups: db, + media: media::Service { + db, + }, + sending: sending::Service::new(db, &config), + + globals: globals::Service::new(db, config, reload_handles)?, + }) + } + + /// Installs `self` to be globally accessed via [`services`] + pub(crate) fn install(self) { + assert!( + SERVICES.set(Box::leak(Box::new(self))).is_ok(), + "Services::install was called more than once" + ); + } +} diff --git a/src/service/account_data.rs b/src/service/account_data.rs new file mode 100644 index 00000000..086d75b4 --- /dev/null +++ b/src/service/account_data.rs @@ -0,0 +1,318 @@ +use std::collections::HashMap; + +use ruma::{ + events::{ + AnyGlobalAccountDataEvent, AnyGlobalAccountDataEventContent, + AnyRoomAccountDataEvent, AnyRoomAccountDataEventContent, + GlobalAccountDataEventContent, GlobalAccountDataEventType, + RoomAccountDataEventContent, RoomAccountDataEventType, + StaticEventContent, + }, + serde::Raw, + RoomId, UserId, +}; +use serde::{Deserialize, Serialize}; + +use crate::{Error, Result}; + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, +} + +pub(crate) fn raw_global_event_to_parts( + event: &Raw, +) -> serde_json::Result<( + GlobalAccountDataEventType, + Raw, +)> { + #[derive(Deserialize)] + struct Parts { + #[serde(rename = "type")] + event_type: GlobalAccountDataEventType, + content: Raw, + } + + let parts = event.deserialize_as::()?; + Ok((parts.event_type, parts.content)) +} + +pub(crate) fn raw_global_event_from_parts( + event_type: &GlobalAccountDataEventType, + content: &Raw, +) -> Raw { + #[derive(Serialize)] + struct Parts<'a> { + #[serde(rename = "type")] + event_type: &'a GlobalAccountDataEventType, + content: &'a Raw, + } + + Raw::new(&Parts { + event_type, + content, + }) + .expect("json serialization should always succeed") + .cast::() +} + +pub(crate) fn raw_room_event_to_parts( + event: &Raw, +) -> serde_json::Result<( + RoomAccountDataEventType, + Raw, +)> { + #[derive(Deserialize)] + struct Parts { + #[serde(rename = "type")] + event_type: RoomAccountDataEventType, + content: Raw, + } + + let parts = event.deserialize_as::()?; + Ok((parts.event_type, parts.content)) +} + +pub(crate) fn raw_room_event_from_parts( + event_type: &RoomAccountDataEventType, + content: &Raw, +) -> Raw { + #[derive(Serialize)] + struct Parts<'a> { + #[serde(rename = "type")] + event_type: &'a RoomAccountDataEventType, + content: &'a Raw, + } + + Raw::new(&Parts { + event_type, + content, + }) + .expect("json serialization should always succeed") + .cast::() +} + +impl Service { + pub(crate) fn new(db: &'static dyn Data) -> Self { + Self { + db, + } + } + + /// Places one event in the global account data of the user and removes the + /// previous entry, with a static event type. + #[tracing::instrument(skip(self, user_id, content))] + pub(crate) fn update_global( + &self, + user_id: &UserId, + content: &Raw, + ) -> Result<()> + where + T: GlobalAccountDataEventContent + StaticEventContent, + { + let event_type = T::TYPE.into(); + let content = content.cast_ref::(); + let event = raw_global_event_from_parts(&event_type, content); + self.db.update(None, user_id, &event_type.to_string(), event.json()) + } + + /// Places one event in the global account data of the user and removes the + /// previous entry, with a dynamic event type. + /// + /// If the event type is known statically, [`Service::update_global`] should + /// be perferred for better type-safety. + #[tracing::instrument(skip(self, user_id, content))] + pub(crate) fn update_global_any( + &self, + user_id: &UserId, + event_type: &GlobalAccountDataEventType, + content: &Raw, + ) -> Result<()> { + let event = raw_global_event_from_parts(event_type, content); + self.db.update(None, user_id, &event_type.to_string(), event.json()) + } + + /// Places one event in the room account data of the user and removes the + /// previous entry for that room, with a static event type. + #[tracing::instrument(skip(self, room_id, user_id, content))] + pub(crate) fn update_room( + &self, + room_id: &RoomId, + user_id: &UserId, + content: &Raw, + ) -> Result<()> + where + T: RoomAccountDataEventContent + StaticEventContent, + { + let event_type = T::TYPE.into(); + let content = content.cast_ref::(); + let event = raw_room_event_from_parts(&event_type, content); + self.db.update( + Some(room_id), + user_id, + &event_type.to_string(), + event.json(), + ) + } + + /// Places one event in the room account data of the user and removes the + /// previous entry for that room, with a dynamic event type. + /// + /// If the event type is known statically, [`Service::update_room`] should + /// be perferred for better type-safety. + #[tracing::instrument(skip(self, room_id, user_id, content))] + pub(crate) fn update_room_any( + &self, + room_id: &RoomId, + user_id: &UserId, + event_type: &RoomAccountDataEventType, + content: &Raw, + ) -> Result<()> { + let event = raw_room_event_from_parts(event_type, content); + self.db.update( + Some(room_id), + user_id, + &event_type.to_string(), + event.json(), + ) + } + + /// Searches the global account data for a specific static event type. + #[tracing::instrument(skip(self, user_id))] + pub(crate) fn get_global( + &self, + user_id: &UserId, + ) -> Result>> + where + T: GlobalAccountDataEventContent + StaticEventContent, + { + let Some(event) = self.db.get(None, user_id, T::TYPE)? else { + return Ok(None); + }; + let event = Raw::::from_json(event); + let (_, content) = raw_global_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; + Ok(Some(content.cast::())) + } + + /// Searches the global account data for a specific dynamic event type. + /// + /// If the event type is known statically, [`Service::get_global`] should + /// be perferred for better type-safety. + #[tracing::instrument(skip(self, user_id, event_type))] + pub(crate) fn get_global_any( + &self, + user_id: &UserId, + event_type: &GlobalAccountDataEventType, + ) -> Result>> { + let Some(event) = + self.db.get(None, user_id, &event_type.to_string())? + else { + return Ok(None); + }; + let event = Raw::::from_json(event); + let (_, content) = raw_global_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; + Ok(Some(content)) + } + + /// Searches the room account data for a specific static event type. + #[tracing::instrument(skip(self, room_id, user_id))] + pub(crate) fn get_room( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result>> + where + T: RoomAccountDataEventContent + StaticEventContent, + { + let Some(event) = self.db.get(Some(room_id), user_id, T::TYPE)? else { + return Ok(None); + }; + let event = Raw::::from_json(event); + let (_, content) = raw_room_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; + Ok(Some(content.cast::())) + } + + /// Searches the room account data for a specific dynamic event type. + /// + /// If the event type is known statically, [`Service::get_room`] should + /// be perferred for better type-safety. + #[tracing::instrument(skip(self, room_id, user_id, event_type))] + pub(crate) fn get_room_any( + &self, + room_id: &RoomId, + user_id: &UserId, + event_type: &RoomAccountDataEventType, + ) -> Result>> { + let Some(event) = + self.db.get(Some(room_id), user_id, &event_type.to_string())? + else { + return Ok(None); + }; + let event = Raw::::from_json(event); + let (_, content) = raw_room_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + })?; + Ok(Some(content)) + } + + /// Returns all changes to global account data that happened after `since`. + /// + /// When there have been multiple changes to the same event type, returned + /// map contains the most recent value. + #[tracing::instrument(skip(self, user_id, since))] + pub(crate) fn global_changes_since( + &self, + user_id: &UserId, + since: u64, + ) -> Result< + HashMap< + GlobalAccountDataEventType, + Raw, + >, + > { + self.db + .changes_since(None, user_id, since)? + .into_values() + .map(|event| { + let event = Raw::::from_json(event); + raw_global_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db") + }) + }) + .collect() + } + + /// Returns all changes to room account data that happened after `since`. + /// + /// When there have been multiple changes to the same event type, returned + /// map contains the most recent value. + #[tracing::instrument(skip(self, room_id, user_id, since))] + pub(crate) fn room_changes_since( + &self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result< + HashMap>, + > { + self.db + .changes_since(Some(room_id), user_id, since)? + .into_values() + .map(|event| { + let event = Raw::::from_json(event); + raw_room_event_to_parts(&event).map_err(|_| { + Error::bad_database("Invalid account data event in db") + }) + }) + .collect() + } +} diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index c7c92981..3616c322 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,35 +1,54 @@ use std::collections::HashMap; -use crate::Result; -use ruma::{ - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; +use ruma::{RoomId, UserId}; +use serde_json::value::RawValue; -pub trait Data: Send + Sync { - /// Places one event in the account data of the user and removes the previous entry. +use crate::Result; + +/// Unlike the service-level API, the database API for account data does not +/// distinguish between global and room events. Because there are no ruma types +/// that cover both, we use strings for the event types and raw json values for +/// the contents. +// +// TODO: once we have the ability to make db schema changes, we should consider +// storing only the content in the db, rather than the whole event object. +pub(crate) trait Data: Send + Sync { + /// Places one event in the account data of the user and removes the + /// previous entry. + /// + /// If `room_id` is `None`, set a global event, otherwise set a room event + /// in the specified room. fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, - event_type: RoomAccountDataEventType, - data: &serde_json::Value, + event_type: &str, + data: &RawValue, ) -> Result<()>; /// Searches the account data for a specific kind. + /// + /// If `room_id` is `None`, search global events, otherwise search room + /// events in the specified room. fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: RoomAccountDataEventType, - ) -> Result>>; + kind: &str, + ) -> Result>>; /// Returns all changes to the account data that happened after `since`. + /// + /// If `room_id` is `None`, read global events, otherwise read room events + /// in the specified room. + /// + /// Returned as a map from event type to event objects (containing both a + /// `type` and a `content` key). When there have been multiple changes to + /// the same event type, returned map contains the most recent value. fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>>; + ) -> Result>>; } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs deleted file mode 100644 index f9c49b1a..00000000 --- a/src/service/account_data/mod.rs +++ /dev/null @@ -1,53 +0,0 @@ -mod data; - -pub use data::Data; - -use ruma::{ - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; - -use std::collections::HashMap; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - event_type: RoomAccountDataEventType, - data: &serde_json::Value, - ) -> Result<()> { - self.db.update(room_id, user_id, event_type, data) - } - - /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, event_type))] - pub fn get( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - event_type: RoomAccountDataEventType, - ) -> Result>> { - self.db.get(room_id, user_id, event_type) - } - - /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( - &self, - room_id: Option<&RoomId>, - user_id: &UserId, - since: u64, - ) -> Result>> { - self.db.changes_since(room_id, user_id, since) - } -} diff --git a/src/service/admin/mod.rs b/src/service/admin.rs similarity index 50% rename from src/service/admin/mod.rs rename to src/service/admin.rs index 484fc134..a63c3133 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin.rs @@ -1,20 +1,18 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, - time::Instant, -}; +use std::{collections::BTreeMap, fmt::Write, sync::Arc, time::Instant}; -use clap::Parser; +use clap::{Parser, Subcommand, ValueEnum}; use regex::Regex; use ruma::{ api::appservice::Registration, events::{ + push_rules::PushRulesEventContent, room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + history_visibility::{ + HistoryVisibility, RoomHistoryVisibilityEventContent, + }, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, @@ -24,37 +22,43 @@ use ruma::{ }, TimelineEventType, }, - EventId, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + room_version_rules::RoomVersionRules, + serde::Raw, + signatures::verify_json, + EventId, MilliSecondsSinceUnixEpoch, OwnedMxcUri, OwnedRoomId, + OwnedServerName, RoomId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, Mutex, RwLock}; +use tracing::warn; +use super::pdu::PduBuilder; use crate::{ api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, services, - utils::{self, HtmlEscape}, + utils::{self, dbg_truncate_str, room_version::RoomVersion}, Error, PduEvent, Result, }; -use super::pdu::PduBuilder; - -#[cfg_attr(test, derive(Debug))] -#[derive(Parser)] -#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] +#[derive(Debug, Parser)] +#[command(name = "@grapevine:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { #[command(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// - /// This command needs a YAML generated by an appservice (such as a bridge), - /// which must be provided in a Markdown code-block below the command. + /// This command needs a YAML generated by an appservice (such as a + /// bridge), which must be provided in a Markdown code-block below the + /// command. /// - /// Registering a new bridge using the ID of an existing bridge will replace - /// the old one. + /// Registering a new bridge using the ID of an existing bridge will + /// replace the old one. /// /// [commandbody]() /// # ``` /// # yaml content here /// # ``` + // Allowed because the doc comment gets parsed by our code later + #[allow(clippy::doc_markdown)] RegisterAppservice, /// Unregister an appservice using its ID @@ -94,13 +98,16 @@ enum AdminCommand { /// /// Users will not be removed from joined rooms by default. /// Can be overridden with --leave-rooms flag. - /// Removing a mass amount of users from a room may cause a significant amount of leave events. - /// The time to leave rooms may depend significantly on joined rooms and servers. + /// Removing a mass amount of users from a room may cause a significant + /// amount of leave events. The time to leave rooms may depend + /// significantly on joined rooms and servers. /// /// [commandbody]() /// # ``` /// # User list here /// # ``` + // Allowed because the doc comment gets parsed by our code later + #[allow(clippy::doc_markdown)] DeactivateAll { #[arg(short, long)] /// Remove users from their joined rooms @@ -110,7 +117,7 @@ enum AdminCommand { force: bool, }, - /// Get the auth_chain of a PDU + /// Get the `auth_chain` of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) event_id: Box, @@ -126,26 +133,16 @@ enum AdminCommand { /// # ``` /// # PDU json content here /// # ``` + // Allowed because the doc comment gets parsed by our code later + #[allow(clippy::doc_markdown)] ParsePdu, - /// Retrieve and print a PDU by ID from the Conduit database + /// Retrieve and print a PDU by ID from the Grapevine database GetPdu { /// An event ID (a $ followed by the base64 reference hash) event_id: Box, }, - /// Print database memory usage statistics - MemoryUsage, - - /// Clears all of Conduit's database caches with index smaller than the amount - ClearDatabaseCaches { amount: u32 }, - - /// Clears all of Conduit's service caches with index smaller than the amount - ClearServiceCaches { amount: u32 }, - - /// Show configuration values - ShowConfig, - /// Reset user password ResetPassword { /// Username of the user for whom the password should be reset @@ -161,15 +158,43 @@ enum AdminCommand { }, /// Disables incoming federation handling for a room. - DisableRoom { room_id: Box }, + DisableRoom { + room_id: Box, + }, /// Enables incoming federation handling for a room again. - EnableRoom { room_id: Box }, + EnableRoom { + room_id: Box, + }, + + /// Delete media and all associated thumbnails. + DeleteMedia { + /// mxc:// URI of the media to delete + mxc: OwnedMxcUri, + }, + + /// Delete cached remote media from the database. + /// + /// This media may still be fetched and cached again in the future. + DeleteRemoteMedia { + /// Output the number of media objects that would be deleted, but do + /// not actually delete anything. + #[clap(short, long)] + dry_run: bool, + + /// If specified, only delete remote media from this origin. + /// + /// If not specified, all remote media will be deleted. + #[clap(long)] + origin: Option, + }, /// Verify json signatures /// [commandbody]() /// # ``` /// # json here /// # ``` + // Allowed because the doc comment gets parsed by our code later + #[allow(clippy::doc_markdown)] SignJson, /// Verify json signatures @@ -177,22 +202,68 @@ enum AdminCommand { /// # ``` /// # json here /// # ``` + // Allowed because the doc comment gets parsed by our code later + #[allow(clippy::doc_markdown)] VerifyJson, + + /// Dynamically change a tracing backend's filter string + TracingFilter { + #[command(subcommand)] + cmd: TracingFilterCommand, + }, +} + +#[derive(Debug, Subcommand)] +enum TracingFilterCommand { + Get { + backend: TracingBackend, + }, + Set { + backend: TracingBackend, + filter: String, + }, + Reset { + backend: TracingBackend, + }, +} + +impl TracingFilterCommand { + fn backend(&self) -> &TracingBackend { + match self { + TracingFilterCommand::Get { + backend, + } + | TracingFilterCommand::Set { + backend, + .. + } + | TracingFilterCommand::Reset { + backend, + } => backend, + } + } } #[derive(Debug)] -pub enum AdminRoomEvent { +pub(crate) enum AdminRoomEvent { ProcessMessage(String), - SendMessage(RoomMessageEventContent), + SendMessage(Box), } -pub struct Service { - pub sender: mpsc::UnboundedSender, +pub(crate) struct Service { + pub(crate) sender: mpsc::UnboundedSender, receiver: Mutex>, } +#[derive(Debug, Clone, ValueEnum)] +enum TracingBackend { + Log, + Flame, + Traces, +} + impl Service { - pub fn build() -> Arc { + pub(crate) fn new() -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); Arc::new(Self { sender, @@ -200,90 +271,112 @@ impl Service { }) } - pub fn start_handler(self: &Arc) { + pub(crate) fn start_handler(self: &Arc) { let self2 = Arc::clone(self); tokio::spawn(async move { - self2.handler().await; + let mut receiver = self2.receiver.lock().await; + + let Ok(Some(grapevine_room)) = self2.get_admin_room() else { + return; + }; + + loop { + let event = receiver + .recv() + .await + .expect("admin command channel has been closed"); + + Self::handle_event(&self2, event, &grapevine_room).await; + } }); } - async fn handler(&self) { - let mut receiver = self.receiver.lock().await; - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); - - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); - - if let Ok(Some(conduit_room)) = services().admin.get_admin_room() { - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await - }; - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .await - .entry(conduit_room.to_owned()) - .or_default(), - ); - - let state_lock = mutex_state.lock().await; - - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&message_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &state_lock, - ) - .await.unwrap(); - } - } + #[tracing::instrument(skip(self, grapevine_room))] + async fn handle_event( + &self, + event: AdminRoomEvent, + grapevine_room: &OwnedRoomId, + ) { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => *content, + AdminRoomEvent::ProcessMessage(room_message) => { + self.process_admin_message(room_message).await } - } - } + }; - pub fn process_message(&self, room_message: String) { - self.sender - .send(AdminRoomEvent::ProcessMessage(room_message)) + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(grapevine_room.clone()) + .await; + + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: TimelineEventType::RoomMessage, + content: to_raw_value(&message_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &services().globals.admin_bot_user_id, + &room_token, + ) + .await .unwrap(); } - pub fn send_message(&self, message_content: RoomMessageEventContent) { + #[tracing::instrument( + skip(self, room_message), + fields( + room_message = dbg_truncate_str(&room_message, 50).as_ref(), + ), + )] + pub(crate) fn process_message(&self, room_message: String) { + self.sender.send(AdminRoomEvent::ProcessMessage(room_message)).unwrap(); + } + + #[tracing::instrument(skip(self, message_content))] + pub(crate) fn send_message( + &self, + message_content: RoomMessageEventContent, + ) { self.sender - .send(AdminRoomEvent::SendMessage(message_content)) + .send(AdminRoomEvent::SendMessage(Box::new(message_content))) .unwrap(); } // Parse and process a message from the admin room - async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { + #[tracing::instrument( + skip(self, room_message), + fields( + room_message = dbg_truncate_str(&room_message, 50).as_ref(), + ), + )] + async fn process_admin_message( + &self, + room_message: String, + ) -> RoomMessageEventContent { let mut lines = room_message.lines().filter(|l| !l.trim().is_empty()); - let command_line = lines.next().expect("each string has at least one line"); + let command_line = + lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let admin_command = match self.parse_admin_command(command_line) { + let admin_command = match Self::parse_admin_command(command_line) { Ok(command) => command, Err(error) => { let server_name = services().globals.server_name(); - let message = error.replace("server.name", server_name.as_str()); - let html_message = self.usage_to_html(&message, server_name); + let message = + error.replace("server.name", server_name.as_str()); + let html_message = Self::usage_to_html(&message, server_name); - return RoomMessageEventContent::text_html(message, html_message); + return RoomMessageEventContent::text_html( + message, + html_message, + ); } }; @@ -291,22 +384,34 @@ impl Service { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( - "Encountered an error while handling the command:\n\ - ```\n{error}\n```", + "Encountered an error while handling the \ + command:\n```\n{error}\n```", ); let html_message = format!( - "Encountered an error while handling the command:\n\ -

\n{error}\n
", + "Encountered an error while handling the \ + command:\n
\n{error}\n
", ); - RoomMessageEventContent::text_html(markdown_message, html_message) + RoomMessageEventContent::text_html( + markdown_message, + html_message, + ) } } } // Parse chat messages from the admin room into an AdminCommand object - fn parse_admin_command(&self, command_line: &str) -> std::result::Result { - // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + #[tracing::instrument( + skip(command_line), + fields( + command_line = dbg_truncate_str(command_line, 50).as_ref(), + ), + )] + fn parse_admin_command( + command_line: &str, + ) -> std::result::Result { + // Note: argv[0] is `@grapevine:servername:`, which is treated as the + // main command let mut argv: Vec<_> = command_line.split_whitespace().collect(); // Replace `help command` with `command --help` @@ -326,6 +431,8 @@ impl Service { AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) } + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self, body))] async fn process_admin_command( &self, command: AdminCommand, @@ -333,18 +440,26 @@ impl Service { ) -> Result { let reply_message_content = match command { AdminCommand::RegisterAppservice => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); + let parsed_config = serde_yaml::from_str::( + &appservice_config, + ); match parsed_config { - Ok(yaml) => match services().appservice.register_appservice(yaml).await { - Ok(id) => RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {id}." - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - )), + Ok(yaml) => match services() + .appservice + .register_appservice(yaml) + .await + { + Ok(id) => RoomMessageEventContent::text_plain( + format!("Appservice registered with ID: {id}."), + ), + Err(e) => RoomMessageEventContent::text_plain( + format!("Failed to register appservice: {e}"), + ), }, Err(e) => RoomMessageEventContent::text_plain(format!( "Could not parse appservice config: {e}" @@ -352,7 +467,8 @@ impl Service { } } else { RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", + "Expected code block in command body. Add --help for \ + details.", ) } } @@ -363,7 +479,9 @@ impl Service { .unregister_appservice(&appservice_identifier) .await { - Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Ok(()) => RoomMessageEventContent::text_plain( + "Appservice unregistered.", + ), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {e}" )), @@ -382,57 +500,74 @@ impl Service { let output = format!( "Rooms:\n{}", room_ids - .filter_map(|r| r.ok()) - .map(|id| id.to_string() - + "\tMembers: " - + &services() + .filter_map(std::result::Result::ok) + .map(|id| format!( + "{id}\tMembers: {}", + &services() .rooms .state_cache .room_joined_count(&id) .ok() .flatten() .unwrap_or(0) - .to_string()) + )) .collect::>() .join("\n") ); RoomMessageEventContent::text_plain(output) } - AdminCommand::ListLocalUsers => match services().users.list_local_users() { + AdminCommand::ListLocalUsers => match services() + .users + .list_local_users() + { Ok(users) => { - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + let mut msg: String = format!( + "Found {} local user account(s):\n", + users.len() + ); msg += &users.join("\n"); RoomMessageEventContent::text_plain(&msg) } Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().await; - let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + let map = + services().globals.roomid_federationhandletime.read().await; + let mut msg: String = + format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { let elapsed = i.elapsed(); - msg += &format!( - "{} {}: {}m{}s\n", - r, - e, + writeln!( + msg, + "{r} {e}: {}m{}s", elapsed.as_secs() / 60, elapsed.as_secs() % 60 - ); + ) + .expect("write to in-memory buffer should succeed"); } RoomMessageEventContent::text_plain(&msg) } - AdminCommand::GetAuthChain { event_id } => { + AdminCommand::GetAuthChain { + event_id, + } => { let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { + if let Some(event) = + services().rooms.timeline.get_pdu_json(&event_id)? + { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + .ok_or_else(|| { + Error::bad_database("Invalid event in database") + })?; - let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { - Error::bad_database("Invalid room id field in event in database") - })?; + let room_id = + <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database( + "Invalid room id field in event in database", + ) + })?; let start = Instant::now(); let count = services() .rooms @@ -449,29 +584,47 @@ impl Service { } } AdminCommand::ParsePdu => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + match ruma::signatures::reference_hash( + &value, + &RoomVersionRules::V6, + ) { Ok(hash) => { - let event_id = EventId::parse(format!("${hash}")); + let event_id = + EventId::parse(format!("${hash}")); match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), + serde_json::to_value(value) + .expect("value is json"), ) { - Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - )), + Ok(pdu) => { + RoomMessageEventContent::text_plain( + format!( + "EventId: {event_id:?}\\ + n{pdu:#?}" + ), + ) + } + Err(e) => { + RoomMessageEventContent::text_plain( + format!( + "EventId: {event_id:?}\\ + nCould not parse event: \ + {e}" + ), + ) + } } } - Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" - )), + Err(e) => RoomMessageEventContent::text_plain( + format!("Could not parse PDU JSON: {e:?}"), + ), } } Err(e) => RoomMessageEventContent::text_plain(format!( @@ -479,10 +632,14 @@ impl Service { )), } } else { - RoomMessageEventContent::text_plain("Expected code block in command body.") + RoomMessageEventContent::text_plain( + "Expected code block in command body.", + ) } } - AdminCommand::GetPdu { event_id } => { + AdminCommand::GetPdu { + event_id, + } => { let mut outlier = false; let mut pdu_json = services() .rooms @@ -490,7 +647,8 @@ impl Service { .get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; - pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; + pdu_json = + services().rooms.timeline.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { @@ -507,51 +665,37 @@ impl Service { json_text ), format!( - "

{}

\n
{}\n
\n", + "

{}

\n
{}\n
\n", if outlier { "PDU is outlier" } else { "PDU was accepted" }, - HtmlEscape(&json_text) + html_escape::encode_safe(&json_text) ), ) } - None => RoomMessageEventContent::text_plain("PDU not found."), + None => { + RoomMessageEventContent::text_plain("PDU not found.") + } } } - AdminCommand::MemoryUsage => { - let response1 = services().memory_usage().await; - let response2 = services().globals.db.memory_usage(); - - RoomMessageEventContent::text_plain(format!( - "Services:\n{response1}\n\nDatabase:\n{response2}" - )) - } - AdminCommand::ClearDatabaseCaches { amount } => { - services().globals.db.clear_caches(amount); - - RoomMessageEventContent::text_plain("Done.") - } - AdminCommand::ClearServiceCaches { amount } => { - services().clear_caches(amount).await; - - RoomMessageEventContent::text_plain("Done.") - } - AdminCommand::ShowConfig => { - // Construct and send the response - RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) - } - AdminCommand::ResetPassword { username } => { + AdminCommand::ResetPassword { + username, + } => { let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), services().globals.server_name(), ) { Ok(id) => id, Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" - ))) + return Ok(RoomMessageEventContent::text_plain( + format!( + "The supplied username is not a valid \ + username: {e}" + ), + )) } }; @@ -560,39 +704,40 @@ impl Service { return Ok(RoomMessageEventContent::text_plain( "The specified user is not from this server!", )); - }; + } // Check if the specified user is valid if !services().users.exists(&user_id)? - || user_id - == UserId::parse_with_server_name( - "conduit", - services().globals.server_name(), - ) - .expect("conduit user exists") + || user_id == services().globals.admin_bot_user_id { return Ok(RoomMessageEventContent::text_plain( "The specified user does not exist!", )); } - let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + let new_password = + utils::random_string(AUTO_GEN_PASSWORD_LENGTH); match services() .users .set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: {new_password}" + "Successfully reset the password for user {user_id}: \ + {new_password}" )), Err(e) => RoomMessageEventContent::text_plain(format!( "Couldn't reset the password for user {user_id}: {e}" )), } } - AdminCommand::CreateUser { username, password } => { - let password = - password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + AdminCommand::CreateUser { + username, + password, + } => { + let password = password.unwrap_or_else(|| { + utils::random_string(AUTO_GEN_PASSWORD_LENGTH) + }); // Validate user id let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), @@ -600,9 +745,12 @@ impl Service { ) { Ok(id) => id, Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {e}" - ))) + return Ok(RoomMessageEventContent::text_plain( + format!( + "The supplied username is not a valid \ + username: {e}" + ), + )) } }; if user_id.is_historical() { @@ -619,47 +767,138 @@ impl Service { services().users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname - let mut displayname = user_id.localpart().to_owned(); - - // If enabled append lightning bolt to display name (default true) - if services().globals.enable_lightning_bolt() { - displayname.push_str(" ⚡️"); - } + let displayname = user_id.localpart().to_owned(); services() .users .set_displayname(&user_id, Some(displayname))?; // Initial account data - services().account_data.update( - None, + services().account_data.update_global( &user_id, - ruma::events::GlobalAccountDataEventType::PushRules - .to_string() - .into(), - &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::push::Ruleset::server_default(&user_id), - }, + &Raw::new(&PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), }) - .expect("to json value always works"), + .expect("json serialization should always succeed"), )?; - // we dont add a device since we're not the user, just the creator + // we dont add a device since we're not the user, just the + // creator // Inhibit login does not work for guests RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: {password}" + "Created user with user_id: {user_id} and password: \ + {password}" )) } - AdminCommand::DisableRoom { room_id } => { + AdminCommand::DisableRoom { + room_id, + } => { services().rooms.metadata.disable_room(&room_id, true)?; RoomMessageEventContent::text_plain("Room disabled.") } - AdminCommand::EnableRoom { room_id } => { + AdminCommand::EnableRoom { + room_id, + } => { services().rooms.metadata.disable_room(&room_id, false)?; RoomMessageEventContent::text_plain("Room enabled.") } + AdminCommand::DeleteMedia { + mxc, + } => { + services().media.delete(mxc).await?; + RoomMessageEventContent::text_plain("Media deleted.") + } + AdminCommand::DeleteRemoteMedia { + dry_run, + origin, + } => { + if origin.as_deref() == Some(services().globals.server_name()) { + return Ok(RoomMessageEventContent::text_plain( + "Specified origin is this server. Will not delete \ + anything.", + )); + } + + let mut count = 0; + + // The `media.iter_all()` iterator is not `Send`, so spawn it in + // a separate thread and send the results over a channel. + let (tx, mut rx) = mpsc::channel(1); + tokio::task::spawn_blocking(move || { + for mxc in services().media.iter_all() { + if tx.blocking_send(mxc).is_err() { + break; + } + } + }); + + let mut failed_keys = 0; + let mut failed_deletes = 0; + while let Some(mxc) = rx.recv().await { + let Ok(mxc) = mxc else { + // Error details are logged by media::iter_all + failed_keys += 1; + continue; + }; + + let server_name = mxc.server_name(); + + if server_name == Ok(services().globals.server_name()) { + continue; + } + if let Some(origin) = &origin { + if server_name != Ok(origin) { + continue; + } + } + + // Technically this can be collapsed, but relying on && + // short-circuiting to avoid the delete side-effect is + // confusing. + #[allow(clippy::collapsible_if)] + if !dry_run { + if services().media.delete(mxc).await.is_err() { + // Error details are logged by media::delete + failed_deletes += 1; + continue; + } + } + count += 1; + } + + let mut message = if dry_run { + format!("{count} media objects would be deleted.") + } else { + format!("{count} media objects deleted.") + }; + + if failed_keys != 0 { + write!( + message, + "\n{failed_keys} corrupted media keys found in the \ + database." + ) + .unwrap(); + } + + if failed_deletes != 0 { + write!( + message, + "\n{failed_deletes} media objects failed to delete." + ) + .unwrap(); + } + + if failed_keys != 0 || failed_deletes != 0 { + write!( + message, + "\nCheck the server logs for more details." + ) + .unwrap(); + } + RoomMessageEventContent::text_plain(message) + } AdminCommand::DeactivateUser { leave_rooms, user_id, @@ -669,13 +908,16 @@ impl Service { RoomMessageEventContent::text_plain(format!( "User {user_id} doesn't exist on this server" )) - } else if user_id.server_name() != services().globals.server_name() { + } else if user_id.server_name() + != services().globals.server_name() + { RoomMessageEventContent::text_plain(format!( "User {user_id} is not from this server" )) } else { RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms before deactivation..." + "Making {user_id} leave all rooms before \ + deactivation..." )); services().users.deactivate_account(&user_id)?; @@ -689,10 +931,18 @@ impl Service { )) } } - AdminCommand::DeactivateAll { leave_rooms, force } => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + AdminCommand::DeactivateAll { + leave_rooms, + force, + } => { + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" { - let users = body.clone().drain(1..body.len() - 1).collect::>(); + let users = body + .clone() + .drain(1..body.len() - 1) + .collect::>(); let mut user_ids = Vec::new(); let mut remote_ids = Vec::new(); @@ -702,12 +952,14 @@ impl Service { for &user in &users { match <&UserId>::try_from(user) { Ok(user_id) => { - if user_id.server_name() != services().globals.server_name() { - remote_ids.push(user_id) + if user_id.server_name() + != services().globals.server_name() + { + remote_ids.push(user_id); } else if !services().users.exists(user_id)? { - non_existant_ids.push(user_id) + non_existant_ids.push(user_id); } else { - user_ids.push(user_id) + user_ids.push(user_id); } } Err(_) => { @@ -719,33 +971,59 @@ impl Service { let mut markdown_message = String::new(); let mut html_message = String::new(); if !invalid_users.is_empty() { - markdown_message.push_str("The following user ids are not valid:\n```\n"); - html_message.push_str("The following user ids are not valid:\n
\n");
+                        markdown_message.push_str(
+                            "The following user ids are not valid:\n```\n",
+                        );
+                        html_message.push_str(
+                            "The following user ids are not valid:\n
\n",
+                        );
                         for invalid_user in invalid_users {
-                            markdown_message.push_str(&format!("{invalid_user}\n"));
-                            html_message.push_str(&format!("{invalid_user}\n"));
+                            writeln!(markdown_message, "{invalid_user}")
+                                .expect(
+                                    "write to in-memory buffer should succeed",
+                                );
+                            writeln!(html_message, "{invalid_user}").expect(
+                                "write to in-memory buffer should succeed",
+                            );
                         }
                         markdown_message.push_str("```\n\n");
                         html_message.push_str("
\n\n"); } if !remote_ids.is_empty() { - markdown_message - .push_str("The following users are not from this server:\n```\n"); - html_message - .push_str("The following users are not from this server:\n
\n");
+                        markdown_message.push_str(
+                            "The following users are not from this \
+                             server:\n```\n",
+                        );
+                        html_message.push_str(
+                            "The following users are not from this \
+                             server:\n
\n",
+                        );
                         for remote_id in remote_ids {
-                            markdown_message.push_str(&format!("{remote_id}\n"));
-                            html_message.push_str(&format!("{remote_id}\n"));
+                            writeln!(markdown_message, "{remote_id}").expect(
+                                "write to in-memory buffer should succeed",
+                            );
+                            writeln!(html_message, "{remote_id}").expect(
+                                "write to in-memory buffer should succeed",
+                            );
                         }
                         markdown_message.push_str("```\n\n");
                         html_message.push_str("
\n\n"); } if !non_existant_ids.is_empty() { - markdown_message.push_str("The following users do not exist:\n```\n"); - html_message.push_str("The following users do not exist:\n
\n");
+                        markdown_message.push_str(
+                            "The following users do not exist:\n```\n",
+                        );
+                        html_message.push_str(
+                            "The following users do not exist:\n
\n",
+                        );
                         for non_existant_id in non_existant_ids {
-                            markdown_message.push_str(&format!("{non_existant_id}\n"));
-                            html_message.push_str(&format!("{non_existant_id}\n"));
+                            writeln!(markdown_message, "{non_existant_id}")
+                                .expect(
+                                    "write to in-memory buffer should succeed",
+                                );
+                            writeln!(html_message, "{non_existant_id}").expect(
+                                "write to in-memory buffer should succeed",
+                            );
                         }
                         markdown_message.push_str("```\n\n");
                         html_message.push_str("
\n\n"); @@ -761,27 +1039,33 @@ impl Service { let mut admins = Vec::new(); if !force { - user_ids.retain(|&user_id| match services().users.is_admin(user_id) { - Ok(is_admin) => match is_admin { - true => { - admins.push(user_id.localpart()); - false + user_ids.retain(|&user_id| { + match services().users.is_admin(user_id) { + Ok(is_admin) => { + if is_admin { + admins.push(user_id.localpart()); + false + } else { + true + } } - false => true, - }, - Err(_) => false, - }) + Err(_) => false, + } + }); } for &user_id in &user_ids { - if services().users.deactivate_account(user_id).is_ok() { - deactivation_count += 1 + if services().users.deactivate_account(user_id).is_ok() + { + deactivation_count += 1; } } if leave_rooms { for &user_id in &user_ids { - let _ = leave_all_rooms(user_id).await; + if let Err(error) = leave_all_rooms(user_id).await { + warn!(%user_id, %error, "failed to leave one or more rooms"); + } } } @@ -790,16 +1074,25 @@ impl Service { "Deactivated {deactivation_count} accounts." )) } else { - RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.\nSkipped admin \ + accounts: {:?}. Use --force to deactivate admin \ + accounts", + deactivation_count, + admins.join(", ") + )) } } else { RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", + "Expected code block in command body. Add --help for \ + details.", ) } } AdminCommand::SignJson => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -810,20 +1103,26 @@ impl Service { &mut value, ) .expect("our request json is what ruma expects"); - let json_text = serde_json::to_string_pretty(&value) - .expect("canonical json is valid json"); + let json_text = + serde_json::to_string_pretty(&value) + .expect("canonical json is valid json"); RoomMessageEventContent::text_plain(json_text) } - Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json: {e}" + )), } } else { RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", + "Expected code block in command body. Add --help for \ + details.", ) } } AdminCommand::VerifyJson => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + if body.len() > 2 + && body[0].trim() == "```" + && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -833,47 +1132,145 @@ impl Service { services() .rooms .event_handler - .fetch_required_signing_keys(&value, &pub_key_map) + // Generally we shouldn't be checking against + // expired keys unless required, so in the admin + // room it might be best to not allow expired + // keys + .fetch_required_signing_keys( + &value, + &pub_key_map + ) .await?; - let pub_key_map = pub_key_map.read().await; - match ruma::signatures::verify_json(&pub_key_map, &value) { - Ok(_) => RoomMessageEventContent::text_plain("Signature correct"), - Err(e) => RoomMessageEventContent::text_plain(format!( + let mut expired_key_map = BTreeMap::new(); + let mut valid_key_map = BTreeMap::new(); + + for (server, keys) in pub_key_map.into_inner() { + if keys.valid_until_ts + > MilliSecondsSinceUnixEpoch::now() + { + valid_key_map.insert( + server, + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(), + ); + } else { + expired_key_map.insert( + server, + keys.verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(), + ); + } + } + + if verify_json(&valid_key_map, &value).is_ok() { + RoomMessageEventContent::text_plain( + "Signature correct", + ) + } else if let Err(e) = + verify_json(&expired_key_map, &value) + { + RoomMessageEventContent::text_plain(format!( "Signature verification failed: {e}" - )), + )) + } else { + RoomMessageEventContent::text_plain( + "Signature correct (with expired keys)", + ) } } - Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json: {e}" + )), } } else { RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", + "Expected code block in command body. Add --help for \ + details.", ) } } + AdminCommand::TracingFilter { + cmd, + } => { + let Some(handles) = &services().globals.reload_handles else { + return Ok(RoomMessageEventContent::text_plain( + "Reloading filters is disabled", + )); + }; + let mut handles = handles.write().await; + let handle = match cmd.backend() { + TracingBackend::Log => &mut handles.log, + TracingBackend::Flame => &mut handles.flame, + TracingBackend::Traces => &mut handles.traces, + }; + let Some(handle) = handle.as_mut() else { + return Ok(RoomMessageEventContent::text_plain( + "Backend is disabled", + )); + }; + + let filter = match cmd { + TracingFilterCommand::Set { + filter, + .. + } => filter, + TracingFilterCommand::Reset { + .. + } => handle.get_initial_filter().to_owned(), + TracingFilterCommand::Get { + .. + } => { + return Ok(RoomMessageEventContent::text_plain( + format!( + "Current filter string: {}", + handle.get_filter() + ), + )); + } + }; + if let Err(e) = handle.set_filter(filter) { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to reload filter: {e}" + ))); + } + + return Ok(RoomMessageEventContent::text_plain( + "Filter reloaded", + )); + } }; Ok(reply_message_content) } // Utility to turn clap's `--help` text to HTML. - fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { - // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + #[tracing::instrument(skip_all)] + fn usage_to_html(text: &str, server_name: &ServerName) -> String { + // Replace `@grapevine:servername:-subcmdname` with + // `@grapevine:servername: subcmdname` + let localpart = services().globals.admin_bot_user_id.localpart(); + let text = text.replace( - &format!("@conduit:{server_name}:-"), - &format!("@conduit:{server_name}: "), + &format!("@{localpart}:{server_name}:-"), + &format!("@{localpart}:{server_name}: "), ); - // For the conduit admin room, subcommands become main commands + // For the grapevine admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); let text = text.replace("subcommand", "command"); - // Escape option names (e.g. ``) since they look like HTML tags + // Escape option names (e.g. ``) since they look like HTML + // tags let text = text.replace('<', "<").replace('>', ">"); // Italicize the first line (command name and version text) - let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let re = + Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1\n"); // Unmerge wrapped lines @@ -888,38 +1285,24 @@ impl Service { .expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1: $4"); - // Look for a `[commandbody]()` tag. If it exists, use all lines below it that - // start with a `#` in the USAGE section. + // Look for a `[commandbody]()` tag. If it exists, use all lines below + // it that start with a `#` in the USAGE section. let mut text_lines: Vec<&str> = text.lines().collect(); - let mut command_body = String::new(); - - if let Some(line_index) = text_lines + let command_body = text_lines .iter() - .position(|line| *line == "[commandbody]()") - { - text_lines.remove(line_index); - - while text_lines - .get(line_index) - .map(|line| line.starts_with('#')) - .unwrap_or(false) - { - command_body += if text_lines[line_index].starts_with("# ") { - &text_lines[line_index][2..] - } else { - &text_lines[line_index][1..] - }; - command_body += "[nobr]\n"; - text_lines.remove(line_index); - } - } + .skip_while(|x| x != &&"[commandbody]()") + .skip(1) + .map_while(|&x| x.strip_prefix('#')) + .map(|x| x.strip_prefix(' ').unwrap_or(x)) + .collect::(); + text_lines.retain(|x| x != &"[commandbody]()"); let text = text_lines.join("\n"); // Improve the usage section let text = if command_body.is_empty() { // Wrap the usage line in code tags - let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") + let re = Regex::new("(?m)^USAGE:\n (@grapevine:.*)$") .expect("Regex compilation should not fail"); re.replace_all(&text, "USAGE:\n$1").to_string() } else { @@ -927,8 +1310,11 @@ impl Service { // This makes the usage of e.g. `register-appservice` more accurate let re = Regex::new("(?m)^USAGE:\n (.*?)\n\n") .expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:\n
$1[nobr]\n[commandbodyblock]
") - .replace("[commandbodyblock]", &command_body) + re.replace_all( + &text, + "USAGE:\n
$1[nobr]\n[commandbodyblock]
", + ) + .replace("[commandbodyblock]", &command_body) }; // Add HTML line-breaks @@ -940,49 +1326,36 @@ impl Service { /// Create the admin room. /// - /// Users in this room are considered admins by conduit, and the room can be - /// used to issue admin commands by talking to the server user inside it. + /// Users in this room are considered admins by grapevine, and the room can + /// be used to issue admin commands by talking to the server user inside + /// it. + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self))] pub(crate) async fn create_admin_room(&self) -> Result<()> { let room_id = RoomId::new(services().globals.server_name()); services().rooms.short.get_or_create_shortroomid(&room_id)?; - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.clone()) + .await; - // Create a user for the server - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + services().users.create(&services().globals.admin_bot_user_id, None)?; - services().users.create(&conduit_user, None)?; - - let room_version = services().globals.default_room_version(); - let mut content = match room_version { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => RoomCreateEventContent::new_v1(conduit_user.clone()), - RoomVersionId::V11 => RoomCreateEventContent::new_v11(), - _ => unreachable!("Validity of room version already checked"), + let room_version_id = services().globals.default_room_version(); + let room_version = RoomVersion::try_from(&room_version_id)?; + let mut content = if room_version.create_event_creator_prop { + RoomCreateEventContent::new_v1( + services().globals.admin_bot_user_id.clone(), + ) + } else { + RoomCreateEventContent::new_v11() }; content.federate = true; content.predecessor = None; - content.room_version = room_version; + content.room_version = room_version_id; // 1. The room create event services() @@ -991,18 +1364,18 @@ impl Service { .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), + content: to_raw_value(&content) + .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; - // 2. Make conduit bot join + // 2. Make grapevine bot join services() .rooms .timeline @@ -1021,18 +1394,19 @@ impl Service { }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(conduit_user.to_string()), + state_key: Some( + services().globals.admin_bot_user_id.to_string(), + ), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; // 3. Power levels let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); + users.insert(services().globals.admin_bot_user_id.clone(), 100.into()); services() .rooms @@ -1046,12 +1420,11 @@ impl Service { }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; @@ -1062,15 +1435,16 @@ impl Service { .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomJoinRulesEventContent::new( + JoinRule::Invite, + )) + .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; @@ -1081,17 +1455,18 @@ impl Service { .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) + content: to_raw_value( + &RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + ), + ) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; @@ -1107,32 +1482,33 @@ impl Service { )) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services().globals.server_name()); + let room_name = + format!("{} Admin Room", services().globals.server_name()); services() .rooms .timeline .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(room_name)) - .expect("event is valid, we just created it"), + content: to_raw_value(&RoomNameEventContent::new( + room_name, + )) + .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; @@ -1143,23 +1519,23 @@ impl Service { PduBuilder { event_type: TimelineEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", services().globals.server_name()), + topic: format!( + "Manage {}", + services().globals.server_name() + ), }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; // 6. Room alias - let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); + let alias = &services().globals.admin_bot_room_alias_id; services() .rooms @@ -1173,60 +1549,53 @@ impl Service { }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; - services().rooms.alias.set_alias(&alias, &room_id)?; + services().rooms.alias.set_alias( + alias, + &room_id, + &services().globals.admin_bot_user_id, + )?; Ok(()) } /// Gets the room ID of the admin room /// - /// Errors are propagated from the database, and will have None if there is no admin room + /// Errors are propagated from the database, and will have None if there is + /// no admin room + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] pub(crate) fn get_admin_room(&self) -> Result> { - let admin_room_alias: Box = - format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - services() .rooms .alias - .resolve_local_alias(&admin_room_alias) + .resolve_local_alias(&services().globals.admin_bot_room_alias_id) } - /// Invite the user to the conduit admin room. + /// Invite the user to the grapevine admin room. /// - /// In conduit, this is equivalent to granting admin privileges. + /// In grapevine, this is equivalent to granting admin privileges. + #[tracing::instrument(skip(self))] pub(crate) async fn make_user_admin( &self, user_id: &UserId, displayname: String, ) -> Result<()> { - if let Some(room_id) = services().admin.get_admin_room()? { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + if let Some(room_id) = self.get_admin_room()? { + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.clone()) + .await; // Use the server user to grant the new admin's power level - let conduit_user = - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); - // Invite and join the real user services() .rooms @@ -1249,9 +1618,8 @@ impl Service { state_key: Some(user_id.to_string()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; services() @@ -1276,14 +1644,16 @@ impl Service { redacts: None, }, user_id, - &room_id, - &state_lock, + &room_token, ) .await?; // Set power level let mut users = BTreeMap::new(); - users.insert(conduit_user.to_owned(), 100.into()); + users.insert( + services().globals.admin_bot_user_id.clone(), + 100.into(), + ); users.insert(user_id.to_owned(), 100.into()); services() @@ -1298,32 +1668,13 @@ impl Service { }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some("".to_owned()), + state_key: Some(String::new()), redacts: None, }, - &conduit_user, - &room_id, - &state_lock, + &services().globals.admin_bot_user_id, + &room_token, ) .await?; - - // Send welcome message - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: TimelineEventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), - format!("

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

For a list of available commands, send the following message in this room: @conduit:{}: --help

\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n", services().globals.server_name()), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - ).await?; } Ok(()) } @@ -1349,11 +1700,13 @@ mod test { } fn get_help_inner(input: &str) { - let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) - .unwrap_err() - .to_string(); + let error = + AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) + .unwrap_err() + .to_string(); - // Search for a handful of keywords that suggest the help printed properly + // Search for a handful of keywords that suggest the help printed + // properly assert!(error.contains("Usage:")); assert!(error.contains("Commands:")); assert!(error.contains("Options:")); diff --git a/src/service/appservice/mod.rs b/src/service/appservice.rs similarity index 67% rename from src/service/appservice/mod.rs rename to src/service/appservice.rs index 9db6609e..8174d2e4 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice.rs @@ -1,29 +1,29 @@ -mod data; - use std::collections::BTreeMap; -pub use data::Data; - use futures_util::Future; use regex::RegexSet; use ruma::{ api::appservice::{Namespace, Registration}, - RoomAliasId, RoomId, UserId, + RoomAliasId, UserId, }; use tokio::sync::RwLock; -use crate::{services, Result}; +use crate::Result; + +mod data; + +pub(crate) use data::Data; /// Compiled regular expressions for a namespace. #[derive(Clone, Debug)] -pub struct NamespaceRegex { - pub exclusive: Option, - pub non_exclusive: Option, +pub(crate) struct NamespaceRegex { + pub(crate) exclusive: Option, + pub(crate) non_exclusive: Option, } impl NamespaceRegex { /// Checks if this namespace has rights to a namespace - pub fn is_match(&self, heystack: &str) -> bool { + pub(crate) fn is_match(&self, heystack: &str) -> bool { if self.is_exclusive_match(heystack) { return true; } @@ -37,7 +37,7 @@ impl NamespaceRegex { } /// Checks if this namespace has exlusive rights to a namespace - pub fn is_exclusive_match(&self, heystack: &str) -> bool { + pub(crate) fn is_exclusive_match(&self, heystack: &str) -> bool { if let Some(exclusive) = &self.exclusive { if exclusive.is_match(heystack) { return true; @@ -48,6 +48,8 @@ impl NamespaceRegex { } impl TryFrom> for NamespaceRegex { + type Error = regex::Error; + fn try_from(value: Vec) -> Result { let mut exclusive = vec![]; let mut non_exclusive = vec![]; @@ -73,32 +75,32 @@ impl TryFrom> for NamespaceRegex { }, }) } - - type Error = regex::Error; } /// Appservice registration combined with its compiled regular expressions. #[derive(Clone, Debug)] -pub struct RegistrationInfo { - pub registration: Registration, - pub users: NamespaceRegex, - pub aliases: NamespaceRegex, - pub rooms: NamespaceRegex, +pub(crate) struct RegistrationInfo { + pub(crate) registration: Registration, + pub(crate) users: NamespaceRegex, + pub(crate) aliases: NamespaceRegex, + pub(crate) rooms: NamespaceRegex, } impl RegistrationInfo { - pub fn is_user_match(&self, user_id: &UserId) -> bool { + pub(crate) fn is_user_match(&self, user_id: &UserId) -> bool { self.users.is_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() } - pub fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { + pub(crate) fn is_exclusive_user_match(&self, user_id: &UserId) -> bool { self.users.is_exclusive_match(user_id.as_str()) || self.registration.sender_localpart == user_id.localpart() } } impl TryFrom for RegistrationInfo { + type Error = regex::Error; + fn try_from(value: Registration) -> Result { Ok(RegistrationInfo { users: value.namespaces.users.clone().try_into()?, @@ -107,17 +109,15 @@ impl TryFrom for RegistrationInfo { registration: value, }) } - - type Error = regex::Error; } -pub struct Service { - pub db: &'static dyn Data, +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, registration_info: RwLock>, } impl Service { - pub fn build(db: &'static dyn Data) -> Result { + pub(crate) fn new(db: &'static dyn Data) -> Result { let mut registration_info = BTreeMap::new(); // Inserting registrations into cache for appservice in db.all()? { @@ -135,12 +135,15 @@ impl Service { registration_info: RwLock::new(registration_info), }) } + /// Registers an appservice and returns the ID to the caller. - pub async fn register_appservice(&self, yaml: Registration) -> Result { + #[tracing::instrument(skip(self, yaml), fields(appservice_id = yaml.id))] + pub(crate) async fn register_appservice( + &self, + yaml: Registration, + ) -> Result { //TODO: Check for collisions between exclusive appservice namespaces - services() - .appservice - .registration_info + self.registration_info .write() .await .insert(yaml.id.clone(), yaml.clone().try_into()?); @@ -153,19 +156,23 @@ impl Service { /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub async fn unregister_appservice(&self, service_name: &str) -> Result<()> { - services() - .appservice - .registration_info - .write() - .await - .remove(service_name) - .ok_or_else(|| crate::Error::AdminCommand("Appservice not found"))?; + #[tracing::instrument(skip(self))] + pub(crate) async fn unregister_appservice( + &self, + service_name: &str, + ) -> Result<()> { + self.registration_info.write().await.remove(service_name).ok_or_else( + || crate::Error::AdminCommand("Appservice not found"), + )?; self.db.unregister_appservice(service_name) } - pub async fn get_registration(&self, id: &str) -> Option { + #[tracing::instrument(skip(self))] + pub(crate) async fn get_registration( + &self, + id: &str, + ) -> Option { self.registration_info .read() .await @@ -174,16 +181,15 @@ impl Service { .map(|info| info.registration) } - pub async fn iter_ids(&self) -> Vec { - self.registration_info - .read() - .await - .keys() - .cloned() - .collect() + pub(crate) async fn iter_ids(&self) -> Vec { + self.registration_info.read().await.keys().cloned().collect() } - pub async fn find_from_token(&self, token: &str) -> Option { + #[tracing::instrument(skip(self))] + pub(crate) async fn find_from_token( + &self, + token: &str, + ) -> Option { self.read() .await .values() @@ -192,7 +198,8 @@ impl Service { } // Checks if a given user id matches any exclusive appservice regex - pub async fn is_exclusive_user_id(&self, user_id: &UserId) -> bool { + #[tracing::instrument(skip(self), ret(level = "trace"))] + pub(crate) async fn is_exclusive_user_id(&self, user_id: &UserId) -> bool { self.read() .await .values() @@ -200,25 +207,22 @@ impl Service { } // Checks if a given room alias matches any exclusive appservice regex - pub async fn is_exclusive_alias(&self, alias: &RoomAliasId) -> bool { + #[tracing::instrument(skip(self), ret(level = "trace"))] + pub(crate) async fn is_exclusive_alias(&self, alias: &RoomAliasId) -> bool { self.read() .await .values() .any(|info| info.aliases.is_exclusive_match(alias.as_str())) } - // Checks if a given room id matches any exclusive appservice regex - pub async fn is_exclusive_room_id(&self, room_id: &RoomId) -> bool { - self.read() - .await - .values() - .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) - } - - pub fn read( + pub(crate) fn read( &self, - ) -> impl Future>> - { + ) -> impl Future< + Output = tokio::sync::RwLockReadGuard< + '_, + BTreeMap, + >, + > { self.registration_info.read() } } diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index ab19a50c..84e2bd90 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -2,7 +2,7 @@ use ruma::api::appservice::Registration; use crate::Result; -pub trait Data: Send + Sync { +pub(crate) trait Data: Send + Sync { /// Registers an appservice and returns the ID to the caller fn register_appservice(&self, yaml: Registration) -> Result; @@ -15,7 +15,9 @@ pub trait Data: Send + Sync { fn get_registration(&self, id: &str) -> Result>; - fn iter_ids<'a>(&'a self) -> Result> + 'a>>; + fn iter_ids<'a>( + &'a self, + ) -> Result> + 'a>>; fn all(&self) -> Result>; } diff --git a/src/service/globals.rs b/src/service/globals.rs new file mode 100644 index 00000000..c0dff71e --- /dev/null +++ b/src/service/globals.rs @@ -0,0 +1,671 @@ +use std::{ + collections::{BTreeMap, HashMap}, + fs, + future::{self, Future}, + iter, + net::{IpAddr, SocketAddr}, + path::PathBuf, + sync::{ + atomic::{self, AtomicBool}, + Arc, RwLock as StdRwLock, + }, + time::{Duration, Instant}, +}; + +use base64::{engine::general_purpose, Engine as _}; +use reqwest::dns::{Addrs, Name, Resolve, Resolving}; +use ruma::{ + api::federation::discovery::ServerSigningKeys, + events::{ + push_rules::PushRulesEventContent, + room::message::RoomMessageEventContent, + }, + push::Ruleset, + serde::{Base64, Raw}, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomAliasId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomVersionId, + ServerName, UInt, UserId, +}; +use tokio::sync::{broadcast, Mutex, RwLock, Semaphore}; +use tracing::{error, warn, Instrument}; +use trust_dns_resolver::TokioAsyncResolver; + +use crate::{ + api::server_server::FedDest, + config::{MediaBackendConfig, MediaFilesystemConfig}, + observability::FilterReloadHandles, + service::media::MediaFileKey, + services, + utils::on_demand_hashmap::{OnDemandHashMap, TokenSet}, + Config, Error, Result, +}; + +mod data; + +pub(crate) use data::{Data, SigningKeys}; + +type WellKnownMap = HashMap; +type TlsNameMap = HashMap, u16)>; +// Time if last failed try, number of failed tries +type RateLimitState = (Instant, u32); + +// Markers for +// [`Service::roomid_mutex_state`]/[`Service::roomid_mutex_insert`]/ +// [`Service::roomid_mutex_federation`] +pub(crate) mod marker { + pub(crate) enum State {} + pub(crate) enum Insert {} + pub(crate) enum Federation {} +} + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, + pub(crate) reload_handles: Option>>, + + // actual_destination, host + pub(crate) actual_destination_cache: Arc>, + pub(crate) tls_name_override: Arc>, + pub(crate) config: Config, + keypair: Arc, + dns_resolver: Arc, + jwt_decoding_key: Option, + federation_client: reqwest::Client, + default_client: reqwest::Client, + pub(crate) stable_room_versions: Vec, + pub(crate) admin_bot_user_id: OwnedUserId, + pub(crate) admin_bot_room_alias_id: OwnedRoomAliasId, + pub(crate) bad_event_ratelimiter: + Arc>>, + pub(crate) bad_signature_ratelimiter: + Arc, RateLimitState>>>, + pub(crate) bad_query_ratelimiter: + Arc>>, + pub(crate) servername_ratelimiter: + OnDemandHashMap, + pub(crate) roomid_mutex_insert: TokenSet, + pub(crate) roomid_mutex_state: TokenSet, + + // this lock will be held longer + pub(crate) roomid_mutex_federation: + TokenSet, + pub(crate) roomid_federationhandletime: + RwLock>, + pub(crate) stateres_mutex: Arc>, + pub(crate) rotate: RotationHandler, + + pub(crate) shutdown: AtomicBool, +} + +/// Handles "rotation" of long-polling requests. "Rotation" in this context is +/// similar to "rotation" of log files and the like. +/// +/// This is utilized to have sync workers return early and release read locks on +/// the database. +pub(crate) struct RotationHandler( + broadcast::Sender<()>, + // TODO: Determine if it's safe to delete this field. I'm not deleting it + // right now because I'm unsure what implications that would have for how + // the sender expects to work. + #[allow(dead_code)] broadcast::Receiver<()>, +); + +impl RotationHandler { + pub(crate) fn new() -> Self { + let (s, r) = broadcast::channel(1); + Self(s, r) + } + + pub(crate) fn watch(&self) -> impl Future { + let mut r = self.0.subscribe(); + + async move { + r.recv().await.expect("should receive a message"); + } + } + + pub(crate) fn fire(&self) { + self.0.send(()).expect("should be able to send message"); + } +} + +impl Default for RotationHandler { + fn default() -> Self { + Self::new() + } +} + +/// Wrapper around [`trust_dns_resolver`]'s [`TokioAsyncResolver`] that can be +/// used with reqwest. +pub(crate) struct DefaultResolver { + inner: Arc, +} + +impl DefaultResolver { + fn new(inner: Arc) -> Self { + DefaultResolver { + inner, + } + } + + fn resolve_inner(&self, name: Name) -> Resolving { + let inner = Arc::clone(&self.inner); + let future = async move { + let lookup = inner.lookup_ip(name.as_str()).await?; + let addrs: Addrs = + Box::new(lookup.into_iter().map(|ip| SocketAddr::new(ip, 0))); + Ok(addrs) + }; + Box::pin(future.in_current_span()) + } +} + +impl Resolve for DefaultResolver { + #[tracing::instrument(skip(self))] + fn resolve(&self, name: Name) -> Resolving { + self.resolve_inner(name) + } +} + +/// Resolver used for outgoing requests to the federation API. +/// +/// Hostnames that have been mapped to a different domain by SRV records in +/// [server discovery][1] are resolved to the SRV record target. This is done to +/// get reqwest to check the TLS certificate against the correct hostname +/// required in steps 3.3, 3.4, and 4 of the server discovery spec. +/// +/// [1]: https://spec.matrix.org/v1.12/server-server-api/#server-discovery +pub(crate) struct FederationResolver { + inner: Arc, + overrides: Arc>, +} + +impl FederationResolver { + pub(crate) fn new( + inner: Arc, + overrides: Arc>, + ) -> Self { + FederationResolver { + inner, + overrides, + } + } +} + +impl Resolve for FederationResolver { + #[tracing::instrument(skip(self))] + fn resolve(&self, name: Name) -> Resolving { + self.overrides + .read() + .unwrap() + .get(name.as_str()) + .and_then(|(override_name, port)| { + override_name.first().map(|first_name| { + let x: Box + Send> = + Box::new(iter::once(SocketAddr::new( + *first_name, + *port, + ))); + let x: Resolving = Box::pin(future::ready(Ok(x))); + x + }) + }) + .unwrap_or_else(|| self.inner.resolve_inner(name)) + } +} + +impl Service { + #[tracing::instrument(skip_all)] + // there are a lot of fields to initialize, not easy to break up but logic + // is fairly linear + #[allow(clippy::too_many_lines)] + pub(crate) fn new( + db: &'static dyn Data, + config: Config, + reload_handles: Option, + ) -> Result { + let keypair = db.load_keypair(); + + let keypair = match keypair { + Ok(k) => k, + Err(e) => { + error!("Keypair invalid. Deleting..."); + db.remove_keypair()?; + return Err(e); + } + }; + + let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new())); + let dns_resolver = Arc::new( + TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { + error!( + "Failed to set up trust dns resolver with system config: \ + {}", + e + ); + Error::bad_config( + "Failed to set up trust dns resolver with system config.", + ) + })?, + ); + let default_resolver = + Arc::new(DefaultResolver::new(Arc::clone(&dns_resolver))); + let federation_resolver = Arc::new(FederationResolver::new( + Arc::clone(&default_resolver), + Arc::clone(&tls_name_override), + )); + + let jwt_decoding_key = config.jwt_secret.as_ref().map(|secret| { + jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()) + }); + + let default_client = reqwest_client_builder(&config)? + .dns_resolver(default_resolver) + .build()?; + + if config.federation.allow_invalid_tls_certificates { + warn!( + "TLS certificate validation is disabled, this is insecure and \ + should not be used in production" + ); + } + + let federation_client = reqwest_client_builder(&config)? + .dns_resolver(federation_resolver) + .build()?; + + // Supported and stable room versions + let stable_room_versions = vec![ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + RoomVersionId::V10, + RoomVersionId::V11, + ]; + + let admin_bot_user_id = UserId::parse(format!( + "@{}:{}", + if config.conduit_compat { + "conduit" + } else { + "grapevine" + }, + config.server_name, + )) + .expect("admin bot user ID should be valid"); + + let admin_bot_room_alias_id = + RoomAliasId::parse(format!("#admins:{}", config.server_name)) + .expect("admin bot room alias ID should be valid"); + + let mut s = Self { + db, + config, + reload_handles: reload_handles.map(|h| Arc::new(RwLock::new(h))), + keypair: Arc::new(keypair), + dns_resolver, + actual_destination_cache: Arc::new( + RwLock::new(WellKnownMap::new()), + ), + tls_name_override, + federation_client, + default_client, + jwt_decoding_key, + stable_room_versions, + admin_bot_user_id, + admin_bot_room_alias_id, + bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + servername_ratelimiter: OnDemandHashMap::new( + "servername_ratelimiter".to_owned(), + ), + roomid_mutex_state: TokenSet::new("roomid_mutex_state".to_owned()), + roomid_mutex_insert: TokenSet::new( + "roomid_mutex_insert".to_owned(), + ), + roomid_mutex_federation: TokenSet::new( + "roomid_mutex_federation".to_owned(), + ), + roomid_federationhandletime: RwLock::new(HashMap::new()), + stateres_mutex: Arc::new(Mutex::new(())), + rotate: RotationHandler::new(), + shutdown: AtomicBool::new(false), + }; + + fs::create_dir_all(s.get_media_folder())?; + + if !s.supported_room_versions().contains(&s.config.default_room_version) + { + error!(config=?s.config.default_room_version, fallback=?crate::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = + crate::config::default_default_room_version(); + } + + Ok(s) + } + + /// Check if `server_name` in the DB and config differ, return error if so + /// + /// Matrix resource ownership is based on the server name; changing it + /// requires recreating the database from scratch. This check needs to be + /// done before background tasks are started to avoid data races. + // Allowed because this function calls `services()` + #[allow(clippy::unused_self)] + pub(crate) fn err_if_server_name_changed( + &self, + ) -> Result<(), crate::error::ServerNameChanged> { + use crate::error::ServerNameChanged as Error; + + if services() + .users + .count() + .map(|x| x > 0) + .map_err(Error::NonZeroUsers)? + { + let admin_bot = self.admin_bot_user_id.as_ref(); + if !services() + .users + .exists(admin_bot) + .map_err(Error::AdminBotExists)? + { + return Err(Error::Renamed); + } + } + + Ok(()) + } + + /// Returns this server's keypair. + pub(crate) fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { + &self.keypair + } + + /// Returns a reqwest client which can be used to send requests + pub(crate) fn default_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.default_client.clone() + } + + /// Returns a client used for resolving .well-knowns + pub(crate) fn federation_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.federation_client.clone() + } + + #[tracing::instrument(skip(self))] + pub(crate) fn next_count(&self) -> Result { + self.db.next_count() + } + + #[tracing::instrument(skip(self))] + pub(crate) fn current_count(&self) -> Result { + self.db.current_count() + } + + pub(crate) async fn watch( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result<()> { + self.db.watch(user_id, device_id).await + } + + pub(crate) fn cleanup(&self) -> Result<()> { + self.db.cleanup() + } + + pub(crate) fn server_name(&self) -> &ServerName { + self.config.server_name.as_ref() + } + + pub(crate) fn max_request_size(&self) -> UInt { + self.config.max_request_size + } + + pub(crate) fn max_fetch_prev_events(&self) -> u16 { + self.config.federation.max_fetch_prev_events + } + + pub(crate) fn allow_registration(&self) -> bool { + self.config.allow_registration + } + + pub(crate) fn allow_encryption(&self) -> bool { + self.config.allow_encryption + } + + pub(crate) fn allow_federation(&self) -> bool { + self.config.federation.enable + } + + pub(crate) fn allow_room_creation(&self) -> bool { + self.config.allow_room_creation + } + + pub(crate) fn default_room_version(&self) -> RoomVersionId { + self.config.default_room_version.clone() + } + + pub(crate) fn trusted_servers(&self) -> &[OwnedServerName] { + &self.config.federation.trusted_servers + } + + pub(crate) fn dns_resolver(&self) -> &TokioAsyncResolver { + &self.dns_resolver + } + + pub(crate) fn jwt_decoding_key( + &self, + ) -> Option<&jsonwebtoken::DecodingKey> { + self.jwt_decoding_key.as_ref() + } + + pub(crate) fn turn_password(&self) -> &String { + &self.config.turn.password + } + + pub(crate) fn turn_ttl(&self) -> u64 { + self.config.turn.ttl + } + + pub(crate) fn turn_uris(&self) -> &[String] { + &self.config.turn.uris + } + + pub(crate) fn turn_username(&self) -> &String { + &self.config.turn.username + } + + pub(crate) fn turn_secret(&self) -> &String { + &self.config.turn.secret + } + + pub(crate) fn emergency_password(&self) -> Option<&str> { + self.config.emergency_password.as_deref() + } + + /// If the emergency password option is set, attempts to set the emergency + /// password and push rules for the @grapevine account. + /// + /// If an error occurs, it is logged. + pub(crate) fn set_emergency_access(&self) { + let inner = || -> Result { + let admin_bot = self.admin_bot_user_id.as_ref(); + + services() + .users + .set_password(admin_bot, self.emergency_password())?; + + let (ruleset, res) = match self.emergency_password() { + Some(_) => (Ruleset::server_default(admin_bot), Ok(true)), + None => (Ruleset::new(), Ok(false)), + }; + + services().account_data.update_global( + admin_bot, + &Raw::new(&PushRulesEventContent { + global: ruleset, + }) + .expect("json serialization should always succeed"), + )?; + + res + }; + + match inner() { + Ok(pwd_set) => { + if pwd_set { + warn!( + "The Grapevine account emergency password is set! \ + Please unset it as soon as you finish admin account \ + recovery!" + ); + services().admin.send_message( + RoomMessageEventContent::text_plain( + "The Grapevine account emergency password is set! \ + Please unset it as soon as you finish admin \ + account recovery!", + ), + ); + } + } + Err(error) => { + error!( + %error, + "Could not set the configured emergency password for the \ + Grapevine user", + ); + } + } + } + + pub(crate) fn supported_room_versions(&self) -> Vec { + self.stable_room_versions.clone() + } + + /// This doesn't actually check that the keys provided are newer than the + /// old set. + pub(crate) fn add_signing_key_from_trusted_server( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result { + self.db.add_signing_key_from_trusted_server(origin, new_keys) + } + + /// Same as `from_trusted_server`, except it will move active keys not + /// present in `new_keys` to `old_signing_keys` + pub(crate) fn add_signing_key_from_origin( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result { + self.db.add_signing_key_from_origin(origin, new_keys) + } + + /// This returns Ok(None) when there are no keys found for the server. + pub(crate) fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + if origin == self.server_name() { + Ok(Some(SigningKeys::load_own_keys())) + } else { + self.db.signing_keys_for(origin) + } + } + + /// Filters the key map of multiple servers down to keys that should be + /// accepted given the expiry time, room version, and timestamp of the + /// paramters + #[allow(clippy::unused_self)] + pub(crate) fn filter_keys_server_map( + &self, + keys: BTreeMap, + timestamp: MilliSecondsSinceUnixEpoch, + ) -> BTreeMap> { + keys.into_iter() + .filter_map(|(server, keys)| { + self.filter_keys_single_server(keys, timestamp) + .map(|keys| (server, keys)) + }) + .collect() + } + + /// Filters the keys of a single server down to keys that should be accepted + /// given the expiry time, room version, and timestamp of the paramters + #[allow(clippy::unused_self)] + pub(crate) fn filter_keys_single_server( + &self, + keys: SigningKeys, + timestamp: MilliSecondsSinceUnixEpoch, + ) -> Option> { + let all_valid = keys.valid_until_ts > timestamp; + + all_valid.then(|| { + // Given that either the room version allows stale keys, or the + // valid_until_ts is in the future, all verify_keys are + // valid + let mut map: BTreeMap<_, _> = keys + .verify_keys + .into_iter() + .map(|(id, key)| (id, key.key)) + .collect(); + + map.extend(keys.old_verify_keys.into_iter().filter_map( + |(id, key)| { + // Even on old room versions, we don't allow old keys if + // they are expired + (key.expired_ts > timestamp).then_some((id, key.key)) + }, + )); + + map + }) + } + + pub(crate) fn database_version(&self) -> Result { + self.db.database_version() + } + + pub(crate) fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.db.bump_database_version(new_version) + } + + pub(crate) fn get_media_folder(&self) -> PathBuf { + let MediaBackendConfig::Filesystem(MediaFilesystemConfig { + path, + }) = &self.config.media.backend; + + path.clone() + } + + pub(crate) fn get_media_file(&self, key: &MediaFileKey) -> PathBuf { + let mut r = self.get_media_folder(); + r.push(general_purpose::URL_SAFE_NO_PAD.encode(key.as_bytes())); + r + } + + pub(crate) fn shutdown(&self) { + self.shutdown.store(true, atomic::Ordering::Relaxed); + self.rotate.fire(); + } +} + +fn reqwest_client_builder(config: &Config) -> Result { + let mut reqwest_client_builder = reqwest::Client::builder() + .pool_max_idle_per_host(0) + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)) + .danger_accept_invalid_certs( + config.federation.allow_invalid_tls_certificates, + ) + .user_agent(format!("{}/{}", env!("CARGO_PKG_NAME"), crate::version())); + + if let Some(proxy) = config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + + Ok(reqwest_client_builder) +} diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 8a66751b..124e81b3 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,37 +1,120 @@ -use std::collections::BTreeMap; +use std::{ + collections::BTreeMap, + time::{Duration, SystemTime}, +}; use async_trait::async_trait; use ruma::{ - api::federation::discovery::{ServerSigningKeys, VerifyKey}, + api::federation::discovery::{OldVerifyKey, ServerSigningKeys, VerifyKey}, + serde::Base64, signatures::Ed25519KeyPair, - DeviceId, OwnedServerSigningKeyId, ServerName, UserId, + DeviceId, MilliSecondsSinceUnixEpoch, ServerName, UserId, }; +use serde::Deserialize; -use crate::Result; +use crate::{services, Result}; + +/// Similar to [`ServerSigningKeys`], but drops a few unnecessary fields we +/// don't require post-validation +#[derive(Deserialize, Debug, Clone)] +pub(crate) struct SigningKeys { + // FIXME: Use [`OwnedServerSigningKeyId`] as key + // Not yet feasibly because they get passed to `verify_event`, see https://github.com/ruma/ruma/pull/1808 + pub(crate) verify_keys: BTreeMap, + pub(crate) old_verify_keys: BTreeMap, + + pub(crate) valid_until_ts: MilliSecondsSinceUnixEpoch, +} + +impl SigningKeys { + /// Creates the `SigningKeys` struct, using the keys of the current server + pub(crate) fn load_own_keys() -> Self { + let old_verify_keys = services() + .globals + .config + .federation + .old_verify_keys + .iter() + .map(|(id, key)| (id.to_string(), key.clone())) + .collect(); + + let mut keys = Self { + verify_keys: BTreeMap::new(), + old_verify_keys, + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"), + }; + + keys.verify_keys.insert( + format!("ed25519:{}", services().globals.keypair().version()), + VerifyKey { + key: Base64::new( + services().globals.keypair.public_key().to_vec(), + ), + }, + ); + + keys + } +} + +impl From for SigningKeys { + fn from(value: ServerSigningKeys) -> Self { + let ServerSigningKeys { + verify_keys, + old_verify_keys, + valid_until_ts, + .. + } = value; + + Self { + verify_keys: verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)) + .collect(), + old_verify_keys: old_verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)) + .collect(), + valid_until_ts, + } + } +} #[async_trait] -pub trait Data: Send + Sync { +pub(crate) trait Data: Send + Sync { fn next_count(&self) -> Result; fn current_count(&self) -> Result; - fn last_check_for_updates_id(&self) -> Result; - fn update_check_for_updates_id(&self, id: u64) -> Result<()>; - async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) + -> Result<()>; fn cleanup(&self) -> Result<()>; - fn memory_usage(&self) -> String; - fn clear_caches(&self, amount: u32); fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; - fn add_signing_key( + /// Only extends the cached keys, not moving any verify_keys to + /// old_verify_keys, as if we suddenly recieve requests from the origin + /// server, we want to be able to accept requests from them + fn add_signing_key_from_trusted_server( &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result>; + ) -> Result; + /// Extends cached keys, as well as moving verify_keys that are not present + /// in these new keys to old_verify_keys, so that potnetially + /// comprimised keys cannot be used to make requests + fn add_signing_key_from_origin( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result; - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found + /// for the server. fn signing_keys_for( &self, origin: &ServerName, - ) -> Result>; + ) -> Result>; fn database_version(&self) -> Result; fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs deleted file mode 100644 index 798c725a..00000000 --- a/src/service/globals/mod.rs +++ /dev/null @@ -1,443 +0,0 @@ -mod data; -pub use data::Data; -use ruma::{ - serde::Base64, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedServerSigningKeyId, OwnedUserId, -}; - -use crate::api::server_server::FedDest; - -use crate::{services, Config, Error, Result}; -use futures_util::FutureExt; -use hyper::{ - client::connect::dns::{GaiResolver, Name}, - service::Service as HyperService, -}; -use reqwest::dns::{Addrs, Resolve, Resolving}; -use ruma::{ - api::{ - client::sync::sync_events, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, - DeviceId, RoomVersionId, ServerName, UserId, -}; -use std::{ - collections::{BTreeMap, HashMap}, - error::Error as StdError, - fs, - future::{self, Future}, - iter, - net::{IpAddr, SocketAddr}, - path::PathBuf, - sync::{ - atomic::{self, AtomicBool}, - Arc, RwLock as StdRwLock, - }, - time::{Duration, Instant}, -}; -use tokio::sync::{broadcast, watch::Receiver, Mutex, RwLock, Semaphore}; -use tracing::{error, info}; -use trust_dns_resolver::TokioAsyncResolver; - -use base64::{engine::general_purpose, Engine as _}; - -type WellKnownMap = HashMap; -type TlsNameMap = HashMap, u16)>; -type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries -type SyncHandle = ( - Option, // since - Receiver>>, // rx -); - -pub struct Service { - pub db: &'static dyn Data, - - pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, - pub config: Config, - keypair: Arc, - dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option, - federation_client: reqwest::Client, - default_client: reqwest::Client, - pub stable_room_versions: Vec, - pub unstable_room_versions: Vec, - pub bad_event_ratelimiter: Arc>>, - pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub bad_query_ratelimiter: Arc>>, - pub servername_ratelimiter: Arc>>>, - pub sync_receivers: RwLock>, - pub roomid_mutex_insert: RwLock>>>, - pub roomid_mutex_state: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer - pub roomid_federationhandletime: RwLock>, - pub stateres_mutex: Arc>, - pub rotate: RotationHandler, - - pub shutdown: AtomicBool, -} - -/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. -/// -/// This is utilized to have sync workers return early and release read locks on the database. -pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); - -impl RotationHandler { - pub fn new() -> Self { - let (s, r) = broadcast::channel(1); - Self(s, r) - } - - pub fn watch(&self) -> impl Future { - let mut r = self.0.subscribe(); - - async move { - let _ = r.recv().await; - } - } - - pub fn fire(&self) { - let _ = self.0.send(()); - } -} - -impl Default for RotationHandler { - fn default() -> Self { - Self::new() - } -} - -pub struct Resolver { - inner: GaiResolver, - overrides: Arc>, -} - -impl Resolver { - pub fn new(overrides: Arc>) -> Self { - Resolver { - inner: GaiResolver::new(), - overrides, - } - } -} - -impl Resolve for Resolver { - fn resolve(&self, name: Name) -> Resolving { - self.overrides - .read() - .unwrap() - .get(name.as_str()) - .and_then(|(override_name, port)| { - override_name.first().map(|first_name| { - let x: Box + Send> = - Box::new(iter::once(SocketAddr::new(*first_name, *port))); - let x: Resolving = Box::pin(future::ready(Ok(x))); - x - }) - }) - .unwrap_or_else(|| { - let this = &mut self.inner.clone(); - Box::pin(HyperService::::call(this, name).map(|result| { - result - .map(|addrs| -> Addrs { Box::new(addrs) }) - .map_err(|err| -> Box { Box::new(err) }) - })) - }) - } -} - -impl Service { - pub fn load(db: &'static dyn Data, config: Config) -> Result { - let keypair = db.load_keypair(); - - let keypair = match keypair { - Ok(k) => k, - Err(e) => { - error!("Keypair invalid. Deleting..."); - db.remove_keypair()?; - return Err(e); - } - }; - - let tls_name_override = Arc::new(StdRwLock::new(TlsNameMap::new())); - - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); - - let default_client = reqwest_client_builder(&config)?.build()?; - let federation_client = reqwest_client_builder(&config)? - .dns_resolver(Arc::new(Resolver::new(tls_name_override.clone()))) - .build()?; - - // Supported and stable room versions - let stable_room_versions = vec![ - RoomVersionId::V6, - RoomVersionId::V7, - RoomVersionId::V8, - RoomVersionId::V9, - RoomVersionId::V10, - RoomVersionId::V11, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - - let mut s = Self { - db, - config, - keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { - error!( - "Failed to set up trust dns resolver with system config: {}", - e - ); - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, - actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), - tls_name_override, - federation_client, - default_client, - jwt_decoding_key, - stable_room_versions, - unstable_room_versions, - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - roomid_mutex_state: RwLock::new(HashMap::new()), - roomid_mutex_insert: RwLock::new(HashMap::new()), - roomid_mutex_federation: RwLock::new(HashMap::new()), - roomid_federationhandletime: RwLock::new(HashMap::new()), - stateres_mutex: Arc::new(Mutex::new(())), - sync_receivers: RwLock::new(HashMap::new()), - rotate: RotationHandler::new(), - shutdown: AtomicBool::new(false), - }; - - fs::create_dir_all(s.get_media_folder())?; - - if !s - .supported_room_versions() - .contains(&s.config.default_room_version) - { - error!(config=?s.config.default_room_version, fallback=?crate::config::default_default_room_version(), "Room version in config isn't supported, falling back to default version"); - s.config.default_room_version = crate::config::default_default_room_version(); - }; - - Ok(s) - } - - /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { - &self.keypair - } - - /// Returns a reqwest client which can be used to send requests - pub fn default_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.default_client.clone() - } - - /// Returns a client used for resolving .well-knowns - pub fn federation_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.federation_client.clone() - } - - #[tracing::instrument(skip(self))] - pub fn next_count(&self) -> Result { - self.db.next_count() - } - - #[tracing::instrument(skip(self))] - pub fn current_count(&self) -> Result { - self.db.current_count() - } - - #[tracing::instrument(skip(self))] - pub fn last_check_for_updates_id(&self) -> Result { - self.db.last_check_for_updates_id() - } - - #[tracing::instrument(skip(self))] - pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> { - self.db.update_check_for_updates_id(id) - } - - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - self.db.watch(user_id, device_id).await - } - - pub fn cleanup(&self) -> Result<()> { - self.db.cleanup() - } - - pub fn server_name(&self) -> &ServerName { - self.config.server_name.as_ref() - } - - pub fn max_request_size(&self) -> u32 { - self.config.max_request_size - } - - pub fn max_fetch_prev_events(&self) -> u16 { - self.config.max_fetch_prev_events - } - - pub fn allow_registration(&self) -> bool { - self.config.allow_registration - } - - pub fn allow_encryption(&self) -> bool { - self.config.allow_encryption - } - - pub fn allow_federation(&self) -> bool { - self.config.allow_federation - } - - pub fn allow_room_creation(&self) -> bool { - self.config.allow_room_creation - } - - pub fn allow_unstable_room_versions(&self) -> bool { - self.config.allow_unstable_room_versions - } - - pub fn default_room_version(&self) -> RoomVersionId { - self.config.default_room_version.clone() - } - - pub fn enable_lightning_bolt(&self) -> bool { - self.config.enable_lightning_bolt - } - - pub fn allow_check_for_updates(&self) -> bool { - self.config.allow_check_for_updates - } - - pub fn trusted_servers(&self) -> &[OwnedServerName] { - &self.config.trusted_servers - } - - pub fn dns_resolver(&self) -> &TokioAsyncResolver { - &self.dns_resolver - } - - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { - self.jwt_decoding_key.as_ref() - } - - pub fn turn_password(&self) -> &String { - &self.config.turn_password - } - - pub fn turn_ttl(&self) -> u64 { - self.config.turn_ttl - } - - pub fn turn_uris(&self) -> &[String] { - &self.config.turn_uris - } - - pub fn turn_username(&self) -> &String { - &self.config.turn_username - } - - pub fn turn_secret(&self) -> &String { - &self.config.turn_secret - } - - pub fn emergency_password(&self) -> &Option { - &self.config.emergency_password - } - - pub fn supported_room_versions(&self) -> Vec { - let mut room_versions: Vec = vec![]; - room_versions.extend(self.stable_room_versions.clone()); - if self.allow_unstable_room_versions() { - room_versions.extend(self.unstable_room_versions.clone()); - }; - room_versions - } - - /// TODO: the key valid until timestamp is only honored in room version > 4 - /// Remove the outdated keys and insert the new ones. - /// - /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key( - &self, - origin: &ServerName, - new_keys: ServerSigningKeys, - ) -> Result> { - self.db.add_signing_key(origin, new_keys) - } - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - pub fn signing_keys_for( - &self, - origin: &ServerName, - ) -> Result> { - let mut keys = self.db.signing_keys_for(origin)?; - if origin == self.server_name() { - keys.insert( - format!("ed25519:{}", services().globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(self.keypair.public_key().to_vec()), - }, - ); - } - - Ok(keys) - } - - pub fn database_version(&self) -> Result { - self.db.database_version() - } - - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.db.bump_database_version(new_version) - } - - pub fn get_media_folder(&self) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r - } - - pub fn get_media_file(&self, key: &[u8]) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r.push(general_purpose::URL_SAFE_NO_PAD.encode(key)); - r - } - - pub fn well_known_client(&self) -> &Option { - &self.config.well_known_client - } - - pub fn shutdown(&self) { - self.shutdown.store(true, atomic::Ordering::Relaxed); - // On shutdown - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - services().globals.rotate.fire(); - } -} - -fn reqwest_client_builder(config: &Config) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .pool_max_idle_per_host(0) - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)); - - if let Some(proxy) = config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } - - Ok(reqwest_client_builder) -} diff --git a/src/service/key_backups.rs b/src/service/key_backups.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/key_backups.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index bf640015..0b667192 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -1,13 +1,14 @@ use std::collections::BTreeMap; -use crate::Result; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, OwnedRoomId, RoomId, UserId, }; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { fn create_backup( &self, user_id: &UserId, @@ -23,12 +24,21 @@ pub trait Data: Send + Sync { backup_metadata: &Raw, ) -> Result; - fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; + fn get_latest_backup_version( + &self, + user_id: &UserId, + ) -> Result>; - fn get_latest_backup(&self, user_id: &UserId) - -> Result)>>; + fn get_latest_backup( + &self, + user_id: &UserId, + ) -> Result)>>; - fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; + fn get_backup( + &self, + user_id: &UserId, + version: &str, + ) -> Result>>; fn add_key( &self, @@ -66,7 +76,12 @@ pub trait Data: Send + Sync { fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; - fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; + fn delete_room_keys( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result<()>; fn delete_room_key( &self, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs deleted file mode 100644 index 5fc52ced..00000000 --- a/src/service/key_backups/mod.rs +++ /dev/null @@ -1,127 +0,0 @@ -mod data; -pub use data::Data; - -use crate::Result; -use ruma::{ - api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - serde::Raw, - OwnedRoomId, RoomId, UserId, -}; -use std::collections::BTreeMap; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn create_backup( - &self, - user_id: &UserId, - backup_metadata: &Raw, - ) -> Result { - self.db.create_backup(user_id, backup_metadata) - } - - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - self.db.delete_backup(user_id, version) - } - - pub fn update_backup( - &self, - user_id: &UserId, - version: &str, - backup_metadata: &Raw, - ) -> Result { - self.db.update_backup(user_id, version, backup_metadata) - } - - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - self.db.get_latest_backup_version(user_id) - } - - pub fn get_latest_backup( - &self, - user_id: &UserId, - ) -> Result)>> { - self.db.get_latest_backup(user_id) - } - - pub fn get_backup( - &self, - user_id: &UserId, - version: &str, - ) -> Result>> { - self.db.get_backup(user_id, version) - } - - pub fn add_key( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, - key_data: &Raw, - ) -> Result<()> { - self.db - .add_key(user_id, version, room_id, session_id, key_data) - } - - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - self.db.count_keys(user_id, version) - } - - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - self.db.get_etag(user_id, version) - } - - pub fn get_all( - &self, - user_id: &UserId, - version: &str, - ) -> Result> { - self.db.get_all(user_id, version) - } - - pub fn get_room( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result>> { - self.db.get_room(user_id, version, room_id) - } - - pub fn get_session( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, - ) -> Result>> { - self.db.get_session(user_id, version, room_id, session_id) - } - - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - self.db.delete_all_keys(user_id, version) - } - - pub fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()> { - self.db.delete_room_keys(user_id, version, room_id) - } - - pub fn delete_room_key( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - session_id: &str, - ) -> Result<()> { - self.db - .delete_room_key(user_id, version, room_id, session_id) - } -} diff --git a/src/service/media.rs b/src/service/media.rs new file mode 100644 index 00000000..9b965caa --- /dev/null +++ b/src/service/media.rs @@ -0,0 +1,405 @@ +use std::io::Cursor; + +use image::imageops::FilterType; +use ruma::{ + api::client::error::ErrorKind, http_headers::ContentDisposition, + OwnedMxcUri, +}; +use tokio::{ + fs::{self, File}, + io::{AsyncReadExt, AsyncWriteExt}, +}; +use tracing::{debug, warn}; + +use crate::{services, utils, Error, Result}; + +mod data; + +pub(crate) use data::Data; + +#[derive(Debug, Eq, PartialEq)] +pub(crate) struct FileMeta { + // This gets written to the database but we no longer read it + // + // TODO: Write a database migration to get rid of this and instead store + // only the filename instead of the entire `Content-Disposition` header. + #[allow(dead_code)] + pub(crate) content_disposition: Option, + pub(crate) content_type: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct MediaFileKey(Vec); + +impl MediaFileKey { + pub(crate) fn new(key: Vec) -> Self { + Self(key) + } + + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.0 + } +} + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, +} + +impl Service { + /// Uploads a file. + #[tracing::instrument(skip(self, file))] + pub(crate) async fn create( + &self, + mxc: OwnedMxcUri, + content_disposition: Option<&ContentDisposition>, + content_type: Option, + file: &[u8], + ) -> Result { + let meta = FileMeta { + content_disposition: content_disposition + .map(ContentDisposition::to_string), + content_type, + }; + // Width, Height = 0 if it's not a thumbnail + let key = self.db.create_file_metadata(mxc, 0, 0, &meta)?; + + self.write_content(&key, file).await?; + Ok(meta) + } + + /// Uploads or replaces a file thumbnail. + #[tracing::instrument(skip(self, file))] + pub(crate) async fn upload_thumbnail( + &self, + mxc: OwnedMxcUri, + content_disposition: Option, + content_type: Option, + width: u32, + height: u32, + file: &[u8], + ) -> Result { + let meta = FileMeta { + content_disposition, + content_type, + }; + let key = self.db.create_file_metadata(mxc, width, height, &meta)?; + + self.write_content(&key, file).await?; + Ok(meta) + } + + /// Downloads a file. + #[tracing::instrument(skip(self))] + pub(crate) async fn get( + &self, + mxc: OwnedMxcUri, + ) -> Result)>> { + if let Some((meta, key)) = self.db.search_file_metadata(mxc, 0, 0)? { + Ok(self.read_content(&key).await?.map(|data| (meta, data))) + } else { + Ok(None) + } + } + + /// Deletes a media object and all associated thumbnails. + #[tracing::instrument(skip(self))] + pub(crate) async fn delete(&self, mxc: OwnedMxcUri) -> Result<()> { + let mut any_files = false; + + let thumbnails = self.db.search_thumbnails_metadata(mxc.clone())?; + for (_, thumbnail_key) in thumbnails { + any_files = true; + self.delete_by_key(thumbnail_key.clone()).await.inspect_err( + |error| { + warn!( + thumbnail_key = utils::u8_slice_to_hex( + thumbnail_key.as_bytes() + ), + %error, + "Failed to delete thumbnail media" + ); + }, + )?; + } + + if let Some((_, key)) = + self.db.search_file_metadata(mxc, 0, 0).inspect_err( + |error| warn!(%error, "Failed to find original media key"), + )? + { + any_files = true; + self.delete_by_key(key).await.inspect_err( + |error| warn!(%error, "Failed to delete original media"), + )?; + } + + if any_files { + Ok(()) + } else { + let error = + Error::BadRequest(ErrorKind::NotFound, "Media not found"); + warn!(%error, "Failed to delete media"); + Err(error) + } + } + + /// Deletes a specific media key, which may or may not be a thumbnail. + /// + /// When deleting a non-thumbnail key with this method, the associated + /// thumbnails are not deleted. + async fn delete_by_key(&self, key: MediaFileKey) -> Result<()> { + let path = services().globals.get_media_file(&key); + match fs::remove_file(path).await { + Ok(()) => (), + // The file in the fs may already have been deleted by hand + Err(e) if e.kind() == std::io::ErrorKind::NotFound => (), + // The file may have never existed in the fs because the name was + // too long + #[cfg(unix)] + Err(e) if e.raw_os_error() == Some(nix::libc::ENAMETOOLONG) => (), + other_error => other_error?, + } + self.db.delete_file_metadata(key)?; + Ok(()) + } + + /// List all media stored in the database. + /// + /// Each MXC is listed once. Thumbnails are not included separately from the + /// original media. + #[tracing::instrument(skip(self))] + pub(crate) fn iter_all(&self) -> impl Iterator> { + let mut prev_mxc = None; + self.db + .all_file_metadata() + .map(|media| media.map(|(mxc, ..)| mxc)) + .filter(move |mxc| { + if let Ok(mxc) = mxc { + // Skip mxcs that we have already seen. All files associated + // with a given mxc should appear consecutively in the db + // iterator, so we only need to check against the previous + // value. + if prev_mxc.as_ref() == Some(mxc) { + return false; + } + prev_mxc = Some(mxc.clone()); + } + true + }) + } + + /// Returns width, height of the thumbnail and whether it should be cropped. + /// Returns None when the server should send the original file. + fn thumbnail_properties( + width: u32, + height: u32, + ) -> Option<(u32, u32, bool)> { + match (width, height) { + (0..=32, 0..=32) => Some((32, 32, true)), + (0..=96, 0..=96) => Some((96, 96, true)), + (0..=320, 0..=240) => Some((320, 240, false)), + (0..=640, 0..=480) => Some((640, 480, false)), + (0..=800, 0..=600) => Some((800, 600, false)), + _ => None, + } + } + + /// Generates a thumbnail from the given image file contents. Returns + /// `Ok(None)` if the input image should be used as-is. + #[tracing::instrument( + skip(file), + fields(input_size = file.len(), original_width, original_height), + )] + fn generate_thumbnail( + file: &[u8], + width: u32, + height: u32, + crop: bool, + ) -> Result>> { + let image = match image::load_from_memory(file) { + Ok(image) => image, + Err(error) => { + warn!(%error, "Failed to parse source image"); + return Ok(None); + } + }; + + let original_width = image.width(); + let original_height = image.height(); + tracing::Span::current().record("original_width", original_width); + tracing::Span::current().record("original_height", original_height); + + if width > original_width || height > original_height { + debug!("Requested thumbnail is larger than source image"); + return Ok(None); + } + + let thumbnail = if crop { + image.resize_to_fill(width, height, FilterType::CatmullRom) + } else { + let (exact_width, exact_height) = { + // Copied from image::dynimage::resize_dimensions + let use_width = (u64::from(width) * u64::from(original_height)) + <= (u64::from(original_width) * u64::from(height)); + let intermediate = if use_width { + u64::from(original_height) * u64::from(width) + / u64::from(original_width) + } else { + u64::from(original_width) * u64::from(height) + / u64::from(original_height) + }; + if use_width { + if let Ok(intermediate) = u32::try_from(intermediate) { + (width, intermediate) + } else { + ( + (u64::from(width) * u64::from(u32::MAX) + / intermediate) + .try_into() + .unwrap_or(u32::MAX), + u32::MAX, + ) + } + } else if let Ok(intermediate) = u32::try_from(intermediate) { + (intermediate, height) + } else { + ( + u32::MAX, + (u64::from(height) * u64::from(u32::MAX) + / intermediate) + .try_into() + .unwrap_or(u32::MAX), + ) + } + }; + + image.thumbnail_exact(exact_width, exact_height) + }; + + debug!("Serializing thumbnail as PNG"); + let mut thumbnail_bytes = Vec::new(); + thumbnail.write_to( + &mut Cursor::new(&mut thumbnail_bytes), + image::ImageFormat::Png, + )?; + + Ok(Some(thumbnail_bytes)) + } + + /// Downloads a file's thumbnail. + /// + /// Here's an example on how it works: + /// + /// - Client requests an image with width=567, height=567 + /// - Server rounds that up to (800, 600), so it doesn't have to save too + /// many thumbnails + /// - Server rounds that up again to (958, 600) to fix the aspect ratio + /// (only for width,height>96) + /// - Server creates the thumbnail and sends it to the user + /// + /// For width,height <= 96 the server uses another thumbnailing algorithm + /// which crops the image afterwards. + #[tracing::instrument(skip(self))] + pub(crate) async fn get_thumbnail( + &self, + mxc: OwnedMxcUri, + width: u32, + height: u32, + ) -> Result)>> { + // 0, 0 because that's the original file + let (width, height, crop) = + Self::thumbnail_properties(width, height).unwrap_or((0, 0, false)); + + if let Some((meta, key)) = + self.db.search_file_metadata(mxc.clone(), width, height)? + { + debug!("Using saved thumbnail"); + return Ok(self.read_content(&key).await?.map(|file| (meta, file))); + } + + let Some((meta, key)) = + self.db.search_file_metadata(mxc.clone(), 0, 0)? + else { + debug!("Original image not found, can't generate thumbnail"); + return Ok(None); + }; + + let Some(file) = self.read_content(&key).await? else { + debug!("Original image not found, can't generate thumbnail"); + return Ok(None); + }; + + debug!("Generating thumbnail"); + let thumbnail_result = { + let file = file.clone(); + let outer_span = tracing::span::Span::current(); + + tokio::task::spawn_blocking(move || { + outer_span.in_scope(|| { + Self::generate_thumbnail(&file, width, height, crop) + }) + }) + .await + .expect("failed to join thumbnailer task") + }; + + let Some(thumbnail_bytes) = thumbnail_result? else { + debug!("Returning source image as-is"); + return Ok(Some((meta, file))); + }; + + debug!("Saving created thumbnail"); + + // Save thumbnail in database so we don't have to generate it + // again next time + let thumbnail_key = + self.db.create_file_metadata(mxc, width, height, &meta)?; + self.write_content(&thumbnail_key, &thumbnail_bytes).await?; + + Ok(Some((meta, thumbnail_bytes.clone()))) + } + + /// Writes contents for a media file to the fs. + /// + /// If a file already existed with the specified key, it is replaced. + async fn write_content( + &self, + key: &MediaFileKey, + data: &[u8], + ) -> Result<()> { + let path = services().globals.get_media_file(key); + let mut file = File::create(path).await?; + + file.write_all(data).await?; + + Ok(()) + } + + /// Returns the contents of a media file, read from the fs. + /// + /// If the file cannot be opened, returns `Ok(None)`. This is needed because + /// before media deletion admin commands were implemented, the only way to + /// delete abuse media was to remove the associated files from the fs. This + /// leaves the db in an inconsistent state, where media keys exist in the db + /// but their content files do not. We want to return `M_NOT_YET_UPLOADED` + /// in this case rather than the `M_UNKNOWN` we would normally use for db + /// consistency problems. + async fn read_content( + &self, + key: &MediaFileKey, + ) -> Result>> { + let path = services().globals.get_media_file(key); + let mut file = match File::open(path).await { + Ok(file) => file, + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + return Ok(None) + } + Err(e) => return Err(e.into()), + }; + + let mut data = Vec::new(); + file.read_to_end(&mut data).await?; + + Ok(Some(data)) + } +} diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 75a682cb..c3cff8cb 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,20 +1,39 @@ +use ruma::OwnedMxcUri; + +use super::{FileMeta, MediaFileKey}; use crate::Result; -pub trait Data: Send + Sync { +pub(crate) trait Data: Send + Sync { fn create_file_metadata( &self, - mxc: String, + mxc: OwnedMxcUri, width: u32, height: u32, - content_disposition: Option<&str>, - content_type: Option<&str>, - ) -> Result>; + meta: &FileMeta, + ) -> Result; - /// Returns content_disposition, content_type and the metadata key. fn search_file_metadata( &self, - mxc: String, + mxc: OwnedMxcUri, width: u32, height: u32, - ) -> Result<(Option, Option, Vec)>; + ) -> Result>; + + fn delete_file_metadata(&self, key: MediaFileKey) -> Result<()>; + + /// Return all thumbnail keys/metadata associated with a MXC. + /// + /// The original file is not returned. To fetch the key/metadata of the + /// original file, use [`Data::search_file_metadata`]. + fn search_thumbnails_metadata( + &self, + mxc: OwnedMxcUri, + ) -> Result>; + + /// Returns an iterator over metadata for all media, including thumbnails. + fn all_file_metadata( + &self, + ) -> Box< + dyn Iterator> + '_, + >; } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs deleted file mode 100644 index fc8fa569..00000000 --- a/src/service/media/mod.rs +++ /dev/null @@ -1,228 +0,0 @@ -mod data; -use std::io::Cursor; - -pub use data::Data; - -use crate::{services, Result}; -use image::imageops::FilterType; - -use tokio::{ - fs::File, - io::{AsyncReadExt, AsyncWriteExt, BufReader}, -}; - -pub struct FileMeta { - pub content_disposition: Option, - pub content_type: Option, - pub file: Vec, -} - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Uploads a file. - pub async fn create( - &self, - mxc: String, - content_disposition: Option<&str>, - content_type: Option<&str>, - file: &[u8], - ) -> Result<()> { - // Width, Height = 0 if it's not a thumbnail - let key = self - .db - .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; - - let path = services().globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - Ok(()) - } - - /// Uploads or replaces a file thumbnail. - #[allow(clippy::too_many_arguments)] - pub async fn upload_thumbnail( - &self, - mxc: String, - content_disposition: Option<&str>, - content_type: Option<&str>, - width: u32, - height: u32, - file: &[u8], - ) -> Result<()> { - let key = - self.db - .create_file_metadata(mxc, width, height, content_disposition, content_type)?; - - let path = services().globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - - Ok(()) - } - - /// Downloads a file. - pub async fn get(&self, mxc: String) -> Result> { - if let Ok((content_disposition, content_type, key)) = - self.db.search_file_metadata(mxc, 0, 0) - { - let path = services().globals.get_media_file(&key); - let mut file = Vec::new(); - BufReader::new(File::open(path).await?) - .read_to_end(&mut file) - .await?; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file, - })) - } else { - Ok(None) - } - } - - /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when - /// the server should send the original file. - pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { - match (width, height) { - (0..=32, 0..=32) => Some((32, 32, true)), - (0..=96, 0..=96) => Some((96, 96, true)), - (0..=320, 0..=240) => Some((320, 240, false)), - (0..=640, 0..=480) => Some((640, 480, false)), - (0..=800, 0..=600) => Some((800, 600, false)), - _ => None, - } - } - - /// Downloads a file's thumbnail. - /// - /// Here's an example on how it works: - /// - /// - Client requests an image with width=567, height=567 - /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails - /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) - /// - Server creates the thumbnail and sends it to the user - /// - /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub async fn get_thumbnail( - &self, - mxc: String, - width: u32, - height: u32, - ) -> Result> { - let (width, height, crop) = self - .thumbnail_properties(width, height) - .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - - if let Ok((content_disposition, content_type, key)) = - self.db.search_file_metadata(mxc.clone(), width, height) - { - // Using saved thumbnail - let path = services().globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } else if let Ok((content_disposition, content_type, key)) = - self.db.search_file_metadata(mxc.clone(), 0, 0) - { - // Generate a thumbnail - let path = services().globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - - if let Ok(image) = image::load_from_memory(&file) { - let original_width = image.width(); - let original_height = image.height(); - if width > original_width || height > original_height { - return Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })); - } - - let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::CatmullRom) - } else { - let (exact_width, exact_height) = { - // Copied from image::dynimage::resize_dimensions - let ratio = u64::from(original_width) * u64::from(height); - let nratio = u64::from(width) * u64::from(original_height); - - let use_width = nratio <= ratio; - let intermediate = if use_width { - u64::from(original_height) * u64::from(width) - / u64::from(original_width) - } else { - u64::from(original_width) * u64::from(height) - / u64::from(original_height) - }; - if use_width { - if intermediate <= u64::from(::std::u32::MAX) { - (width, intermediate as u32) - } else { - ( - (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ::std::u32::MAX, - ) - } - } else if intermediate <= u64::from(::std::u32::MAX) { - (intermediate as u32, height) - } else { - ( - ::std::u32::MAX, - (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ) - } - }; - - image.thumbnail_exact(exact_width, exact_height) - }; - - let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to( - &mut Cursor::new(&mut thumbnail_bytes), - image::ImageOutputFormat::Png, - )?; - - // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata( - mxc, - width, - height, - content_disposition.as_deref(), - content_type.as_deref(), - )?; - - let path = services().globals.get_media_file(&thumbnail_key); - let mut f = File::create(path).await?; - f.write_all(&thumbnail_bytes).await?; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: thumbnail_bytes.to_vec(), - })) - } else { - // Couldn't parse file to generate thumbnail, send original - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } - } else { - Ok(None) - } - } -} diff --git a/src/service/mod.rs b/src/service/mod.rs deleted file mode 100644 index 4c11bc18..00000000 --- a/src/service/mod.rs +++ /dev/null @@ -1,220 +0,0 @@ -use std::{ - collections::{BTreeMap, HashMap}, - sync::{Arc, Mutex as StdMutex}, -}; - -use lru_cache::LruCache; -use tokio::sync::{broadcast, Mutex}; - -use crate::{Config, Result}; -use tokio::sync::RwLock; - -pub mod account_data; -pub mod admin; -pub mod appservice; -pub mod globals; -pub mod key_backups; -pub mod media; -pub mod pdu; -pub mod pusher; -pub mod rooms; -pub mod sending; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; - -pub struct Services { - pub appservice: appservice::Service, - pub pusher: pusher::Service, - pub rooms: rooms::Service, - pub transaction_ids: transaction_ids::Service, - pub uiaa: uiaa::Service, - pub users: users::Service, - pub account_data: account_data::Service, - pub admin: Arc, - pub globals: globals::Service, - pub key_backups: key_backups::Service, - pub media: media::Service, - pub sending: Arc, -} - -impl Services { - pub fn build< - D: appservice::Data - + pusher::Data - + rooms::Data - + transaction_ids::Data - + uiaa::Data - + users::Data - + account_data::Data - + globals::Data - + key_backups::Data - + media::Data - + sending::Data - + 'static, - >( - db: &'static D, - config: Config, - ) -> Result { - Ok(Self { - appservice: appservice::Service::build(db)?, - pusher: pusher::Service { db }, - rooms: rooms::Service { - alias: rooms::alias::Service { db }, - auth_chain: rooms::auth_chain::Service { db }, - directory: rooms::directory::Service { db }, - edus: rooms::edus::Service { - presence: rooms::edus::presence::Service { db }, - read_receipt: rooms::edus::read_receipt::Service { db }, - typing: rooms::edus::typing::Service { - typing: RwLock::new(BTreeMap::new()), - last_typing_update: RwLock::new(BTreeMap::new()), - typing_update_sender: broadcast::channel(100).0, - }, - }, - event_handler: rooms::event_handler::Service, - lazy_loading: rooms::lazy_loading::Service { - db, - lazy_load_waiting: Mutex::new(HashMap::new()), - }, - metadata: rooms::metadata::Service { db }, - outlier: rooms::outlier::Service { db }, - pdu_metadata: rooms::pdu_metadata::Service { db }, - search: rooms::search::Service { db }, - short: rooms::short::Service { db }, - state: rooms::state::Service { db }, - state_accessor: rooms::state_accessor::Service { - db, - server_visibility_cache: StdMutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - user_visibility_cache: StdMutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - }, - state_cache: rooms::state_cache::Service { db }, - state_compressor: rooms::state_compressor::Service { - db, - stateinfo_cache: StdMutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - }, - timeline: rooms::timeline::Service { - db, - lasttimelinecount_cache: Mutex::new(HashMap::new()), - }, - threads: rooms::threads::Service { db }, - spaces: rooms::spaces::Service { - roomid_spacechunk_cache: Mutex::new(LruCache::new(200)), - }, - user: rooms::user::Service { db }, - }, - transaction_ids: transaction_ids::Service { db }, - uiaa: uiaa::Service { db }, - users: users::Service { - db, - connections: StdMutex::new(BTreeMap::new()), - }, - account_data: account_data::Service { db }, - admin: admin::Service::build(), - key_backups: key_backups::Service { db }, - media: media::Service { db }, - sending: sending::Service::build(db, &config), - - globals: globals::Service::load(db, config)?, - }) - } - async fn memory_usage(&self) -> String { - let lazy_load_waiting = self.rooms.lazy_loading.lazy_load_waiting.lock().await.len(); - let server_visibility_cache = self - .rooms - .state_accessor - .server_visibility_cache - .lock() - .unwrap() - .len(); - let user_visibility_cache = self - .rooms - .state_accessor - .user_visibility_cache - .lock() - .unwrap() - .len(); - let stateinfo_cache = self - .rooms - .state_compressor - .stateinfo_cache - .lock() - .unwrap() - .len(); - let lasttimelinecount_cache = self - .rooms - .timeline - .lasttimelinecount_cache - .lock() - .await - .len(); - let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len(); - - format!( - "\ -lazy_load_waiting: {lazy_load_waiting} -server_visibility_cache: {server_visibility_cache} -user_visibility_cache: {user_visibility_cache} -stateinfo_cache: {stateinfo_cache} -lasttimelinecount_cache: {lasttimelinecount_cache} -roomid_spacechunk_cache: {roomid_spacechunk_cache}\ - " - ) - } - async fn clear_caches(&self, amount: u32) { - if amount > 0 { - self.rooms - .lazy_loading - .lazy_load_waiting - .lock() - .await - .clear(); - } - if amount > 1 { - self.rooms - .state_accessor - .server_visibility_cache - .lock() - .unwrap() - .clear(); - } - if amount > 2 { - self.rooms - .state_accessor - .user_visibility_cache - .lock() - .unwrap() - .clear(); - } - if amount > 3 { - self.rooms - .state_compressor - .stateinfo_cache - .lock() - .unwrap() - .clear(); - } - if amount > 4 { - self.rooms - .timeline - .lasttimelinecount_cache - .lock() - .await - .clear(); - } - if amount > 5 { - self.rooms - .spaces - .roomid_spacechunk_cache - .lock() - .await - .clear(); - } - } -} diff --git a/src/service/pdu.rs b/src/service/pdu.rs index a51d7ec5..59e870d3 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,67 +1,78 @@ -use crate::Error; +use std::{borrow::Cow, cmp::Ordering, collections::BTreeMap, sync::Arc}; + use ruma::{ canonical_json::redact_content_in_place, events::{ - room::{member::RoomMemberEventContent, redaction::RoomRedactionEventContent}, - space::child::HierarchySpaceChildEvent, - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, - AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, + room::member::RoomMemberEventContent, + space::child::HierarchySpaceChildEvent, AnyMessageLikeEvent, + AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, + AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, TimelineEventType, }, + room_version_rules::{RedactionRules, RoomVersionRules}, serde::Raw, - state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, UInt, UserId, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, + UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; use tracing::warn; +use crate::{utils::room_version::RoomVersion, Error}; + /// Content hashes of a PDU. #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct EventHash { +pub(crate) struct EventHash { /// The SHA-256 hash. - pub sha256: String, + pub(crate) sha256: String, } #[derive(Clone, Deserialize, Debug, Serialize)] -pub struct PduEvent { - pub event_id: Arc, - pub room_id: OwnedRoomId, - pub sender: OwnedUserId, - pub origin_server_ts: UInt, +pub(crate) struct PduEvent { + pub(crate) event_id: Arc, + pub(crate) room_id: OwnedRoomId, + pub(crate) sender: OwnedUserId, + pub(crate) origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: TimelineEventType, - pub content: Box, + pub(crate) kind: TimelineEventType, + pub(crate) content: Box, #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, - pub prev_events: Vec>, - pub depth: UInt, - pub auth_events: Vec>, + pub(crate) state_key: Option, + pub(crate) prev_events: Vec>, + pub(crate) depth: UInt, + pub(crate) auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option>, + pub(crate) redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] - pub unsigned: Option>, - pub hashes: EventHash, + pub(crate) unsigned: Option>, + pub(crate) hashes: EventHash, + + // The schema of this `RawJsonValue` is `BTreeMap, + // BTreeMap>` #[serde(default, skip_serializing_if = "Option::is_none")] - pub signatures: Option>, // BTreeMap, BTreeMap> + pub(crate) signatures: Option>, } impl PduEvent { #[tracing::instrument(skip(self))] - pub fn redact( + pub(crate) fn redact( &mut self, - room_version_id: RoomVersionId, + rules: &RedactionRules, reason: &PduEvent, ) -> crate::Result<()> { self.unsigned = None; - let mut content = serde_json::from_str(self.content.get()) - .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; - redact_content_in_place(&mut content, &room_version_id, self.kind.to_string()) - .map_err(|e| Error::RedactionError(self.sender.server_name().to_owned(), e))?; + let mut content = + serde_json::from_str(self.content.get()).map_err(|_| { + Error::bad_database("PDU in db has invalid content.") + })?; + redact_content_in_place(&mut content, rules, self.kind.to_string()) + .map_err(|e| { + Error::Redaction(self.sender.server_name().to_owned(), e) + })?; self.unsigned = Some(to_raw_value(&json!({ "redacted_because": serde_json::to_value(reason).expect("to_value(PduEvent) always works") @@ -72,65 +83,148 @@ impl PduEvent { Ok(()) } - pub fn remove_transaction_id(&mut self) -> crate::Result<()> { + pub(crate) fn is_redacted(&self) -> bool { + #[derive(Deserialize)] + struct ExtractRedactedBecause { + redacted_because: Option, + } + + let Some(unsigned) = &self.unsigned else { + return false; + }; + + let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) + else { + return false; + }; + + unsigned.redacted_because.is_some() + } + + /// Returns the redaction target of an event, if present. + /// + /// For versions =v11, it is part of the room content. + // Allowed because we don't use state_res::Event anywhere in grapevine, it's + // just implemented so we can pass Pdu to ruma stateres. + #[expect(clippy::same_name_method)] + pub(crate) fn redacts( + &self, + room_version: &RoomVersion, + ) -> Option> { + if room_version.redaction_event_redacts_in_content { + #[derive(Deserialize)] + struct ExtractRedacts<'a> { + #[serde(borrow)] + redacts: Option>, + } + let extract = + serde_json::from_str::>(self.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in redaction pdu.") + }) + .ok()?; + extract.redacts + } else { + self.redacts.as_ref().map(|e| Cow::Borrowed(&**e)) + } + } + + pub(crate) fn remove_transaction_id(&mut self) -> crate::Result<()> { if let Some(unsigned) = &self.unsigned { let mut unsigned: BTreeMap> = - serde_json::from_str(unsigned.get()) - .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + serde_json::from_str(unsigned.get()).map_err(|_| { + Error::bad_database("Invalid unsigned in pdu event") + })?; unsigned.remove("transaction_id"); - self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + self.unsigned = + Some(to_raw_value(&unsigned).expect("unsigned is valid")); } Ok(()) } - pub fn add_age(&mut self) -> crate::Result<()> { + pub(crate) fn add_age(&mut self) -> crate::Result<()> { let mut unsigned: BTreeMap> = self .unsigned .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) - .map_err(|_| Error::bad_database("Invalid unsigned in pdu event"))?; + .map_or_else( + || Ok(BTreeMap::new()), + |u| serde_json::from_str(u.get()), + ) + .map_err(|_| { + Error::bad_database("Invalid unsigned in pdu event") + })?; unsigned.insert("age".to_owned(), to_raw_value(&1).unwrap()); - self.unsigned = Some(to_raw_value(&unsigned).expect("unsigned is valid")); + self.unsigned = + Some(to_raw_value(&unsigned).expect("unsigned is valid")); Ok(()) } - /// Copies the `redacts` property of the event to the `content` dict and vice-versa. + /// Copies the `redacts` property of the event to the `content` dict and + /// vice-versa. /// /// This follows the specification's /// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): /// - /// > For backwards-compatibility with older clients, servers should add a redacts - /// > property to the top level of m.room.redaction events in when serving such events + /// > For backwards-compatibility with older clients, servers should add a + /// > redacts + /// > property to the top level of m.room.redaction events in when serving + /// > such events /// > over the Client-Server API. /// > - /// > For improved compatibility with newer clients, servers should add a redacts property - /// > to the content of m.room.redaction events in older room versions when serving + /// > For improved compatibility with newer clients, servers should add a + /// > redacts property + /// > to the content of m.room.redaction events in older room versions when + /// > serving /// > such events over the Client-Server API. - pub fn copy_redacts(&self) -> (Option>, Box) { - if self.kind == TimelineEventType::RoomRedaction { - if let Ok(mut content) = - serde_json::from_str::(self.content.get()) - { - if let Some(redacts) = content.redacts { - return (Some(redacts.into()), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts.into()); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); + pub(crate) fn copy_redacts( + &self, + ) -> (Option>, Box) { + #[derive(Deserialize)] + struct ExtractRedacts<'a> { + #[serde(borrow)] + redacts: Option>, + } + + if self.kind != TimelineEventType::RoomRedaction { + return (self.redacts.clone(), self.content.clone()); + } + let Ok(extract) = + serde_json::from_str::>(self.content.get()) + else { + return (self.redacts.clone(), self.content.clone()); + }; + + if let Some(redacts) = extract.redacts { + (Some(redacts.into()), self.content.clone()) + } else if let Some(redacts) = self.redacts.clone() { + let content = serde_json::from_str::>( + self.content.get(), + ); + let mut content = match content { + Ok(content) => content, + Err(error) => { + warn!(%error, "PDU is not a valid json object"); + return (self.redacts.clone(), self.content.clone()); } - } - } + }; - (self.redacts.clone(), self.content.clone()) + let redacts_json = to_raw_value(&redacts) + .expect("all strings should be representable as json"); + content.insert("redacts", &redacts_json); + let content_json = to_raw_value(&content) + .expect("Must be valid, we only added redacts field"); + (self.redacts.clone(), content_json) + } else { + (self.redacts.clone(), self.content.clone()) + } } #[tracing::instrument(skip(self))] - pub fn to_sync_room_event(&self) -> Raw { + pub(crate) fn to_sync_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -153,33 +247,8 @@ impl PduEvent { serde_json::from_value(json).expect("Raw::from_value always works") } - /// This only works for events that are also AnyRoomEvents. #[tracing::instrument(skip(self))] - pub fn to_any_event(&self) -> Raw { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &self.redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") - } - - #[tracing::instrument(skip(self))] - pub fn to_room_event(&self) -> Raw { + pub(crate) fn to_room_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -204,7 +273,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_message_like_event(&self) -> Raw { + pub(crate) fn to_message_like_event(&self) -> Raw { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -229,7 +298,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_state_event(&self) -> Raw { + pub(crate) fn to_state_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -248,7 +317,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_sync_state_event(&self) -> Raw { + pub(crate) fn to_sync_state_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -266,7 +335,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_stripped_state_event(&self) -> Raw { + pub(crate) fn to_stripped_state_event(&self) -> Raw { let json = json!({ "content": self.content, "type": self.kind, @@ -278,7 +347,9 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_stripped_spacechild_state_event(&self) -> Raw { + pub(crate) fn to_stripped_spacechild_state_event( + &self, + ) -> Raw { let json = json!({ "content": self.content, "type": self.kind, @@ -291,7 +362,9 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_member_event(&self) -> Raw> { + pub(crate) fn to_member_event( + &self, + ) -> Raw> { let mut json = json!({ "content": self.content, "type": self.kind, @@ -311,30 +384,23 @@ impl PduEvent { } /// This does not return a full `Pdu` it is only to satisfy ruma's types. - #[tracing::instrument] - pub fn convert_to_outgoing_federation_event( + #[tracing::instrument(skip(pdu_json))] + pub(crate) fn convert_to_outgoing_federation_event( mut pdu_json: CanonicalJsonObject, ) -> Box { - if let Some(unsigned) = pdu_json - .get_mut("unsigned") - .and_then(|val| val.as_object_mut()) + if let Some(unsigned) = + pdu_json.get_mut("unsigned").and_then(|val| val.as_object_mut()) { unsigned.remove("transaction_id"); } pdu_json.remove("event_id"); - // TODO: another option would be to convert it to a canonical string to validate size - // and return a Result> - // serde_json::from_str::>( - // ruma::serde::to_canonical_json_string(pdu_json).expect("CanonicalJson is valid serde_json::Value"), - // ) - // .expect("Raw::from_value always works") - - to_raw_value(&pdu_json).expect("CanonicalJson is valid serde_json::Value") + to_raw_value(&pdu_json) + .expect("CanonicalJson is valid serde_json::Value") } - pub fn from_id_val( + pub(crate) fn from_id_val( event_id: &EventId, mut json: CanonicalJsonObject, ) -> Result { @@ -378,11 +444,15 @@ impl state_res::Event for PduEvent { self.state_key.as_deref() } - fn prev_events(&self) -> Box + '_> { + fn prev_events( + &self, + ) -> Box + '_> { Box::new(self.prev_events.iter()) } - fn auth_events(&self) -> Box + '_> { + fn auth_events( + &self, + ) -> Box + '_> { Box::new(self.auth_events.iter()) } @@ -412,20 +482,22 @@ impl Ord for PduEvent { /// Generates a correct eventId for the incoming pdu. /// -/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. +/// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, - room_version_id: &RoomVersionId, + rules: &RoomVersionRules, ) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - warn!("Error parsing incoming event {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; + let value: CanonicalJsonObject = + serde_json::from_str(pdu.get()).map_err(|error| { + warn!(%error, object = ?pdu, "Error parsing incoming event"); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, room_version_id) + ruma::signatures::reference_hash(&value, rules) .expect("ruma can calculate reference hashes") ) .try_into() @@ -436,11 +508,11 @@ pub(crate) fn gen_event_id_canonical_json( /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] -pub struct PduBuilder { +pub(crate) struct PduBuilder { #[serde(rename = "type")] - pub event_type: TimelineEventType, - pub content: Box, - pub unsigned: Option>, - pub state_key: Option, - pub redacts: Option>, + pub(crate) event_type: TimelineEventType, + pub(crate) content: Box, + pub(crate) unsigned: Option>, + pub(crate) state_key: Option, + pub(crate) redacts: Option>, } diff --git a/src/service/pusher/mod.rs b/src/service/pusher.rs similarity index 59% rename from src/service/pusher/mod.rs rename to src/service/pusher.rs index 6ca86be7..8f58cea8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher.rs @@ -1,56 +1,76 @@ -mod data; -pub use data::Data; -use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx}; +use std::{fmt::Debug, mem}; -use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ client::push::{set_pusher, Pusher, PusherKind}, push_gateway::send_event_notification::{ self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, + v1::{ + Device, Notification, NotificationCounts, NotificationPriority, + }, }, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, - events::{room::power_levels::RoomPowerLevelsEventContent, StateEventType, TimelineEventType}, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, + events::{ + room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, + StateEventType, TimelineEventType, + }, + push::{ + Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, + Ruleset, Tweak, + }, serde::Raw, uint, RoomId, UInt, UserId, }; +use tracing::warn; -use std::{fmt::Debug, mem}; -use tracing::{info, warn}; +use crate::{services, utils, Error, PduEvent, Result}; -pub struct Service { - pub db: &'static dyn Data, +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, } impl Service { - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> { + pub(crate) fn set_pusher( + &self, + sender: &UserId, + pusher: set_pusher::v3::PusherAction, + ) -> Result<()> { self.db.set_pusher(sender, pusher) } - pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + pub(crate) fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { self.db.get_pusher(sender, pushkey) } - pub fn get_pushers(&self, sender: &UserId) -> Result> { + pub(crate) fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } - pub fn get_pushkeys(&self, sender: &UserId) -> Box>> { + pub(crate) fn get_pushkeys( + &self, + sender: &UserId, + ) -> Box>> { self.db.get_pushkeys(sender) } #[tracing::instrument(skip(self, destination, request))] - pub async fn send_request( + pub(crate) async fn send_request( &self, destination: &str, request: T, ) -> Result where - T: Debug, + T: OutgoingRequest + Debug, { let destination = destination.replace("/_matrix/push/v1/notify", ""); @@ -60,23 +80,17 @@ impl Service { SendAccessToken::IfRequired(""), &[MatrixVersion::V1_0], ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); + .map_err(|error| { + warn!(%error, %destination, "Failed to find destination"); Error::BadServerResponse("Invalid destination") })? - .map(|body| body.freeze()); + .map(BytesMut::freeze); let reqwest_request = reqwest::Request::try_from(http_request)?; - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - let url = reqwest_request.url().clone(); - let response = services() - .globals - .default_client() - .execute(reqwest_request) - .await; + let response = + services().globals.default_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -92,18 +106,22 @@ impl Service { .expect("http::response::Builder is usable"), ); - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); + // TODO: handle timeout + let body = response.bytes().await.unwrap_or_else(|error| { + warn!(%error, "Server error"); Vec::new().into() - }); // TODO: handle timeout + }); if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) + warn!( + push_gateway = %destination, + %status, + %url, + body = %utils::dbg_truncate_str( + String::from_utf8_lossy(&body).as_ref(), + 100, + ), + "Push gateway returned bad response", ); } @@ -112,23 +130,31 @@ impl Service { .body(body) .expect("reqwest body is valid http body"), ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url + response.map_err(|error| { + warn!( + %error, + appservice = %destination, + %url, + "Push gateway returned invalid response bytes", ); - Error::BadServerResponse("Push gateway returned bad response.") + Error::BadServerResponse( + "Push gateway returned bad response.", + ) }) } - Err(e) => { - warn!("Could not send request to pusher {}: {}", destination, e); - Err(e.into()) + Err(error) => { + warn!( + %error, + %destination, + "Could not send request to push gateway", + ); + Err(error.into()) } } } #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] - pub async fn send_push_notice( + pub(crate) async fn send_push_notice( &self, user: &UserId, unread: UInt, @@ -144,8 +170,9 @@ impl Service { .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + serde_json::from_str(ev.content.get()).map_err(|_| { + Error::bad_database("invalid m.room.power_levels event") + }) }) .transpose()? .unwrap_or_default(); @@ -183,8 +210,10 @@ impl Service { Ok(()) } + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] #[tracing::instrument(skip(self, user, ruleset, pdu))] - pub fn get_actions<'a>( + pub(crate) fn get_actions<'a>( &self, user: &UserId, ruleset: &'a Ruleset, @@ -200,7 +229,8 @@ impl Service { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently + // TODO: get member count efficiently + member_count: 10_u32.into(), user_id: user.to_owned(), user_display_name: services() .users @@ -225,17 +255,21 @@ impl Service { PusherKind::Http(http) => { // TODO: // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 1. if "event_id_only" is the only format kind it seems we + // should never add more info // 2. can pusher/devices have conflicting formats - let event_id_only = http.format == Some(PushFormat::EventIdOnly); + let event_id_only = + http.format == Some(PushFormat::EventIdOnly); - let mut device = Device::new(pusher.ids.app_id.clone(), pusher.ids.pushkey.clone()); - device.data.default_payload = http.default_payload.clone(); - device.data.format = http.format.clone(); + let mut device = Device::new( + pusher.ids.app_id.clone(), + pusher.ids.pushkey.clone(), + ); + device.data = http.clone().into(); // Tweaks are only added if the format is NOT event_id_only if !event_id_only { - device.tweaks = tweaks.clone(); + device.tweaks.clone_from(&tweaks); } let d = vec![device]; @@ -248,38 +282,48 @@ impl Service { notifi.counts = NotificationCounts::new(unread, uint!(0)); if event.kind == TimelineEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + || tweaks.iter().any(|t| { + matches!(t, Tweak::Highlight(true) | Tweak::Sound(_)) + }) { - notifi.prio = NotificationPriority::High + notifi.prio = NotificationPriority::High; } if event_id_only { - self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) - .await?; + self.send_request( + &http.url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } else { notifi.sender = Some(event.sender.clone()); notifi.event_type = Some(event.kind.clone()); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = + serde_json::value::to_raw_value(&event.content).ok(); if event.kind == TimelineEventType::RoomMember { - notifi.user_is_target = - event.state_key.as_deref() == Some(event.sender.as_str()); + notifi.user_is_target = event.state_key.as_deref() + == Some(event.sender.as_str()); } - notifi.sender_display_name = services().users.displayname(&event.sender)?; + notifi.sender_display_name = + services().users.displayname(&event.sender)?; - notifi.room_name = services().rooms.state_accessor.get_name(&event.room_id)?; + notifi.room_name = services() + .rooms + .state_accessor + .get_name(&event.room_id)?; - self.send_request(&http.url, send_event_notification::v1::Request::new(notifi)) - .await?; + self.send_request( + &http.url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } Ok(()) } // TODO: Handle email - PusherKind::Email(_) => Ok(()), _ => Ok(()), } } diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 2062f567..9fcba933 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,16 +1,27 @@ -use crate::Result; use ruma::{ api::client::push::{set_pusher, Pusher}, UserId, }; -pub trait Data: Send + Sync { - fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()>; +use crate::Result; - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; +pub(crate) trait Data: Send + Sync { + fn set_pusher( + &self, + sender: &UserId, + pusher: set_pusher::v3::PusherAction, + ) -> Result<()>; + + fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result>; fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pushkeys<'a>(&'a self, sender: &UserId) - -> Box> + 'a>; + fn get_pushkeys<'a>( + &'a self, + sender: &UserId, + ) -> Box> + 'a>; } diff --git a/src/service/rooms.rs b/src/service/rooms.rs new file mode 100644 index 00000000..aab3cf00 --- /dev/null +++ b/src/service/rooms.rs @@ -0,0 +1,62 @@ +pub(crate) mod alias; +pub(crate) mod auth_chain; +pub(crate) mod directory; +pub(crate) mod edus; +pub(crate) mod event_handler; +pub(crate) mod lazy_loading; +pub(crate) mod metadata; +pub(crate) mod outlier; +pub(crate) mod pdu_metadata; +pub(crate) mod search; +pub(crate) mod short; +pub(crate) mod spaces; +pub(crate) mod state; +pub(crate) mod state_accessor; +pub(crate) mod state_cache; +pub(crate) mod state_compressor; +pub(crate) mod threads; +pub(crate) mod timeline; +pub(crate) mod user; + +pub(crate) trait Data: + alias::Data + + auth_chain::Data + + directory::Data + + edus::Data + + lazy_loading::Data + + metadata::Data + + outlier::Data + + pdu_metadata::Data + + search::Data + + short::Data + + state::Data + + state_accessor::Data + + state_cache::Data + + state_compressor::Data + + timeline::Data + + threads::Data + + user::Data +{ +} + +pub(crate) struct Service { + pub(crate) alias: alias::Service, + pub(crate) auth_chain: auth_chain::Service, + pub(crate) directory: directory::Service, + pub(crate) edus: edus::Service, + pub(crate) event_handler: event_handler::Service, + pub(crate) lazy_loading: lazy_loading::Service, + pub(crate) metadata: metadata::Service, + pub(crate) outlier: outlier::Service, + pub(crate) pdu_metadata: pdu_metadata::Service, + pub(crate) search: search::Service, + pub(crate) short: short::Service, + pub(crate) state: state::Service, + pub(crate) state_accessor: state_accessor::Service, + pub(crate) state_cache: state_cache::Service, + pub(crate) state_compressor: state_compressor::Service, + pub(crate) timeline: timeline::Service, + pub(crate) threads: threads::Service, + pub(crate) spaces: spaces::Service, + pub(crate) user: user::Service, +} diff --git a/src/service/rooms/alias.rs b/src/service/rooms/alias.rs new file mode 100644 index 00000000..638d4d92 --- /dev/null +++ b/src/service/rooms/alias.rs @@ -0,0 +1,121 @@ +use http::StatusCode; +use ruma::{ + api::{client::error::ErrorKind, federation}, + OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId, UserId, +}; + +use crate::{services, Error, Result}; + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, +} + +impl Service { + pub(crate) fn new(db: &'static D) -> Self + where + D: Data, + { + Self { + db, + } + } + + /// Creates or updates the alias to the given room id. + pub(crate) fn set_alias( + &self, + alias: &RoomAliasId, + room_id: &RoomId, + user_id: &UserId, + ) -> Result<()> { + if alias == services().globals.admin_bot_room_alias_id + && user_id != services().globals.admin_bot_user_id + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Only the admin bot can modify this alias", + )); + } + + self.db.set_alias(alias, room_id) + } + + /// Forgets about an alias. Returns an error if the alias did not exist. + pub(crate) fn remove_alias( + &self, + alias: &RoomAliasId, + user_id: &UserId, + ) -> Result<()> { + if alias == services().globals.admin_bot_room_alias_id + && user_id != services().globals.admin_bot_user_id + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Only the admin bot can modify this alias", + )); + } + + self.db.remove_alias(alias) + } + + /// Looks up the roomid for the given local alias. + pub(crate) fn resolve_local_alias( + &self, + alias: &RoomAliasId, + ) -> Result> { + self.db.resolve_local_alias(alias) + } + + /// Returns all local aliases that point to the given room + pub(crate) fn local_aliases_for_room<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + self.db.local_aliases_for_room(room_id) + } + + /// Looks up the roomid for the given alias, fetching over federation if + /// remote. + pub(crate) async fn resolve_alias( + &self, + alias: &RoomAliasId, + ) -> Result> { + if alias.server_name() == services().globals.server_name() { + self.resolve_local_alias(alias) + } else { + self.resolve_remote_alias(alias).await + } + } + + /// Look up an alias on a remote server over federation. + async fn resolve_remote_alias( + &self, + alias: &RoomAliasId, + ) -> Result> { + let result = services() + .sending + .send_federation_request( + alias.server_name(), + federation::query::get_room_information::v1::Request { + room_alias: alias.to_owned(), + }, + ) + .await; + + match result { + Ok(response) => Ok(Some(response.room_id)), + // The spec only names the 404 status code explicitly, but matching + // on M_NOT_FOUND as well seems reasonable. + Err(Error::Federation(_, error)) + if error.status_code == StatusCode::NOT_FOUND + || error.error_kind() == Some(&ErrorKind::NotFound) => + { + Ok(None) + } + Err(error) => Err(error), + } + } +} diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 629b1ee1..0e0e6f7c 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,7 +1,8 @@ -use crate::Result; use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { /// Creates or updates the alias to the given room id. fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; @@ -9,7 +10,10 @@ pub trait Data: Send + Sync { fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; /// Looks up the roomid for the given alias. - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; + fn resolve_local_alias( + &self, + alias: &RoomAliasId, + ) -> Result>; /// Returns all local aliases that point to the given room fn local_aliases_for_room<'a>( diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs deleted file mode 100644 index d26030c0..00000000 --- a/src/service/rooms/alias/mod.rs +++ /dev/null @@ -1,35 +0,0 @@ -mod data; - -pub use data::Data; - -use crate::Result; -use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - #[tracing::instrument(skip(self))] - pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { - self.db.set_alias(alias, room_id) - } - - #[tracing::instrument(skip(self))] - pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { - self.db.remove_alias(alias) - } - - #[tracing::instrument(skip(self))] - pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { - self.db.resolve_local_alias(alias) - } - - #[tracing::instrument(skip(self))] - pub fn local_aliases_for_room<'a>( - &'a self, - room_id: &RoomId, - ) -> Box> + 'a> { - self.db.local_aliases_for_room(room_id) - } -} diff --git a/src/service/rooms/auth_chain.rs b/src/service/rooms/auth_chain.rs new file mode 100644 index 00000000..2c7a4787 --- /dev/null +++ b/src/service/rooms/auth_chain.rs @@ -0,0 +1,236 @@ +use std::{ + collections::{BTreeSet, HashSet}, + sync::{Arc, Mutex}, +}; + +use lru_cache::LruCache; +use ruma::{api::client::error::ErrorKind, EventId, RoomId}; +use tracing::{debug, error, warn}; + +use super::short::ShortEventId; +use crate::{ + observability::{FoundIn, Lookup, METRICS}, + services, + utils::debug_slice_truncated, + Error, Result, +}; + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, + #[allow(clippy::type_complexity)] + auth_chain_cache: + Option, Arc>>>>, +} + +impl Service { + pub(crate) fn new( + db: &'static dyn Data, + auth_chain_cache_size: usize, + ) -> Self { + Self { + db, + auth_chain_cache: (auth_chain_cache_size > 0) + .then(|| Mutex::new(LruCache::new(auth_chain_cache_size))), + } + } + + pub(crate) fn get_cached_eventid_authchain( + &self, + key: &[ShortEventId], + ) -> Result>>> { + let lookup = Lookup::AuthChain; + + if let Some(cache) = &self.auth_chain_cache { + if let Some(result) = cache.lock().unwrap().get_mut(key) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(Some(Arc::clone(result))); + } + } + + let Some(chain) = self.db.get_cached_eventid_authchain(key)? else { + METRICS.record_lookup(lookup, FoundIn::Nothing); + return Ok(None); + }; + + METRICS.record_lookup(lookup, FoundIn::Database); + let chain = Arc::new(chain); + + if let Some(cache) = &self.auth_chain_cache { + cache.lock().unwrap().insert(vec![key[0]], Arc::clone(&chain)); + } + + Ok(Some(chain)) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn cache_auth_chain( + &self, + key: Vec, + auth_chain: Arc>, + ) -> Result<()> { + self.db.cache_auth_chain(&key, &auth_chain)?; + if let Some(cache) = &self.auth_chain_cache { + cache.lock().unwrap().insert(key, auth_chain); + } + Ok(()) + } + + #[tracing::instrument( + skip(self, starting_events), + fields(starting_events = debug_slice_truncated(&starting_events, 5)), + )] + pub(crate) async fn get_auth_chain<'a>( + &self, + room_id: &RoomId, + starting_events: Vec>, + ) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = + services().rooms.short.get_or_create_shorteventid(&id)?; + // I'm afraid to change this in case there is accidental reliance on + // the truncation + #[allow(clippy::as_conversions, clippy::cast_possible_truncation)] + let bucket_id = (short.get() % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec<_> = + chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = + self.get_cached_eventid_authchain(&chunk_key)? + { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = + self.get_cached_eventid_authchain(&[sevent_id])? + { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new( + self.get_auth_chain_inner(room_id, &event_id)?, + ); + self.cache_auth_chain( + vec![sevent_id], + Arc::clone(&auth_chain), + )?; + debug!( + event_id = ?event_id, + chain_length = ?auth_chain.len(), + "Cache missed event" + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + debug!( + chunk_cache_length = ?chunk_cache.len(), + hits = ?hits2, + misses = ?misses2, + "Chunk missed", + ); + let chunk_cache = Arc::new(chunk_cache); + self.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + debug!( + chain_length = ?full_auth_chain.len(), + hits = ?hits, + misses = ?misses, + "Auth chain stats", + ); + + Ok(full_auth_chain.into_iter().filter_map(move |sid| { + services().rooms.short.get_eventid_from_short(sid).ok() + })) + } + + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + #[tracing::instrument(skip(self))] + fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match services().rooms.timeline.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + warn!(bad_room_id = %pdu.room_id, "Event referenced in auth chain has incorrect room id"); + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "Event has incorrect room id", + )); + } + for auth_event in &pdu.auth_events { + let sauthevent = services() + .rooms + .short + .get_or_create_shorteventid(auth_event)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!( + ?event_id, + "Could not find pdu mentioned in auth events" + ); + } + Err(error) => { + error!( + ?event_id, + ?error, + "Could not load event in auth chain" + ); + } + } + } + + Ok(found) + } +} diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index e8c379fc..9dc855d2 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,11 +1,15 @@ -use crate::Result; -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; -pub trait Data: Send + Sync { +use crate::{service::rooms::short::ShortEventId, Result}; + +pub(crate) trait Data: Send + Sync { fn get_cached_eventid_authchain( &self, - shorteventid: &[u64], - ) -> Result>>>; - fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) - -> Result<()>; + shorteventid: &[ShortEventId], + ) -> Result>>; + fn cache_auth_chain( + &self, + shorteventid: &[ShortEventId], + auth_chain: &HashSet, + ) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs deleted file mode 100644 index da1944e2..00000000 --- a/src/service/rooms/auth_chain/mod.rs +++ /dev/null @@ -1,161 +0,0 @@ -mod data; -use std::{ - collections::{BTreeSet, HashSet}, - sync::Arc, -}; - -pub use data::Data; -use ruma::{api::client::error::ErrorKind, EventId, RoomId}; -use tracing::{debug, error, warn}; - -use crate::{services, Error, Result}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { - self.db.get_cached_eventid_authchain(key) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { - self.db.cache_auth_chain(key, auth_chain) - } - - #[tracing::instrument(skip(self, starting_events))] - pub async fn get_auth_chain<'a>( - &self, - room_id: &RoomId, - starting_events: Vec>, - ) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - let mut i = 0; - for id in starting_events { - let short = services().rooms.short.get_or_create_shorteventid(&id)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services() - .rooms - .auth_chain - .get_cached_eventid_authchain(&chunk_key)? - { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - let mut i = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = services() - .rooms - .auth_chain - .get_cached_eventid_authchain(&[sevent_id])? - { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); - services() - .rooms - .auth_chain - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - debug!( - event_id = ?event_id, - chain_length = ?auth_chain.len(), - "Cache missed event" - ); - chunk_cache.extend(auth_chain.iter()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - }; - } - debug!( - chunk_cache_length = ?chunk_cache.len(), - hits = ?hits2, - misses = ?misses2, - "Chunk missed", - ); - let chunk_cache = Arc::new(chunk_cache); - services() - .rooms - .auth_chain - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - debug!( - chain_length = ?full_auth_chain.len(), - hits = ?hits, - misses = ?misses, - "Auth chain stats", - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) - } - - #[tracing::instrument(skip(self, event_id))] - fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { - let mut todo = vec![Arc::from(event_id)]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match services().rooms.timeline.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); - } - for auth_event in &pdu.auth_events { - let sauthevent = services() - .rooms - .short - .get_or_create_shorteventid(auth_event)?; - - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!(?event_id, "Could not find pdu mentioned in auth events"); - } - Err(error) => { - error!(?event_id, ?error, "Could not load event in auth chain"); - } - } - } - - Ok(found) - } -} diff --git a/src/service/rooms/directory.rs b/src/service/rooms/directory.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/directory.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index aca731ce..f0bc3bf5 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,7 +1,8 @@ -use crate::Result; use ruma::{OwnedRoomId, RoomId}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { /// Adds the room to the public room directory fn set_public(&self, room_id: &RoomId) -> Result<()>; @@ -12,5 +13,7 @@ pub trait Data: Send + Sync { fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms<'a>(&'a self) -> Box> + 'a>; + fn public_rooms<'a>( + &'a self, + ) -> Box> + 'a>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs deleted file mode 100644 index 10f782bb..00000000 --- a/src/service/rooms/directory/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -mod data; - -pub use data::Data; -use ruma::{OwnedRoomId, RoomId}; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_public(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_not_public(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - self.db.is_public_room(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator> + '_ { - self.db.public_rooms() - } -} diff --git a/src/service/rooms/edus.rs b/src/service/rooms/edus.rs new file mode 100644 index 00000000..d9483465 --- /dev/null +++ b/src/service/rooms/edus.rs @@ -0,0 +1,9 @@ +pub(crate) mod read_receipt; +pub(crate) mod typing; + +pub(crate) trait Data: read_receipt::Data + 'static {} + +pub(crate) struct Service { + pub(crate) read_receipt: read_receipt::Service, + pub(crate) typing: typing::Service, +} diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs deleted file mode 100644 index a6bc3d5b..00000000 --- a/src/service/rooms/edus/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub mod presence; -pub mod read_receipt; -pub mod typing; - -pub trait Data: presence::Data + read_receipt::Data + 'static {} - -pub struct Service { - pub presence: presence::Service, - pub read_receipt: read_receipt::Service, - pub typing: typing::Service, -} diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs deleted file mode 100644 index 53329e08..00000000 --- a/src/service/rooms/edus/presence/data.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::collections::HashMap; - -use crate::Result; -use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; - -pub trait Data: Send + Sync { - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()>; - - /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; - - /// Returns the presence event with correct last_active_ago. - fn get_presence_event( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result>; - - /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result>; -} diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs deleted file mode 100644 index 4b929d28..00000000 --- a/src/service/rooms/edus/presence/mod.rs +++ /dev/null @@ -1,125 +0,0 @@ -mod data; -use std::collections::HashMap; - -pub use data::Data; -use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - _user_id: &UserId, - _room_id: &RoomId, - _presence: PresenceEvent, - ) -> Result<()> { - // self.db.update_presence(user_id, room_id, presence) - Ok(()) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, _user_id: &UserId) -> Result<()> { - // self.db.ping_presence(user_id) - Ok(()) - } - - pub fn get_last_presence_event( - &self, - _user_id: &UserId, - _room_id: &RoomId, - ) -> Result> { - // let last_update = match self.db.last_presence_update(user_id)? { - // Some(last) => last, - // None => return Ok(None), - // }; - - // self.db.get_presence_event(room_id, user_id, last_update) - Ok(None) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - pub fn presence_since( - &self, - _room_id: &RoomId, - _since: u64, - ) -> Result> { - // self.db.presence_since(room_id, since) - Ok(HashMap::new()) - } -} diff --git a/src/service/rooms/edus/read_receipt.rs b/src/service/rooms/edus/read_receipt.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/edus/read_receipt.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 044dad82..b48ec9e2 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,7 +1,10 @@ -use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; +use ruma::{ + events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId, +}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { /// Replaces the previous read receipt. fn readreceipt_update( &self, @@ -10,7 +13,8 @@ pub trait Data: Send + Sync { event: ReceiptEvent, ) -> Result<()>; - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + /// Returns an iterator over the most recent read receipts in a room that + /// happened after the event with id `since`. #[allow(clippy::type_complexity)] fn readreceipts_since<'a>( &'a self, @@ -27,11 +31,28 @@ pub trait Data: Send + Sync { >; /// Sets a private read marker at `count`. - fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; + fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result<()>; /// Returns the private read marker. - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + // TODO: Implement MSC2285 + #[allow(dead_code)] + fn private_read_get( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result>; /// Returns the count of the last typing update in this room. - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; + // TODO: Implement MSC2285 + #[allow(dead_code)] + fn last_privateread_update( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result; } diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs deleted file mode 100644 index c6035280..00000000 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ /dev/null @@ -1,55 +0,0 @@ -mod data; - -pub use data::Data; - -use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event) - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - OwnedUserId, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self))] - pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } -} diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing.rs similarity index 55% rename from src/service/rooms/edus/typing/mod.rs rename to src/service/rooms/edus/typing.rs index 7546aa84..097fed27 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing.rs @@ -1,19 +1,40 @@ -use ruma::{events::SyncEphemeralRoomEvent, OwnedRoomId, OwnedUserId, RoomId, UserId}; use std::collections::BTreeMap; + +use ruma::{ + events::{typing::TypingEventContent, SyncEphemeralRoomEvent}, + OwnedRoomId, OwnedUserId, RoomId, UserId, +}; use tokio::sync::{broadcast, RwLock}; +use tracing::trace; use crate::{services, utils, Result}; -pub struct Service { - pub typing: RwLock>>, // u64 is unix timestamp of timeout - pub last_typing_update: RwLock>, // timestamp of the last change to typing users - pub typing_update_sender: broadcast::Sender, +pub(crate) struct Service { + // u64 is unix timestamp of timeout + typing: RwLock>>, + // timestamp of the last change to typing users + last_typing_update: RwLock>, + typing_update_sender: broadcast::Sender, } impl Service { - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub async fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + pub(crate) fn new() -> Self { + Self { + typing: RwLock::new(BTreeMap::new()), + last_typing_update: RwLock::new(BTreeMap::new()), + typing_update_sender: broadcast::channel(100).0, + } + } + + /// Sets a user as typing until the timeout timestamp is reached or + /// `roomtyping_remove` is called. + #[tracing::instrument(skip(self))] + pub(crate) async fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + ) -> Result<()> { self.typing .write() .await @@ -24,12 +45,22 @@ impl Service { .write() .await .insert(room_id.to_owned(), services().globals.next_count()?); - let _ = self.typing_update_sender.send(room_id.to_owned()); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { + trace!( + "receiver found what it was looking for and is no longer \ + interested" + ); + } Ok(()) } /// Removes a user from typing before the timeout is reached. - pub async fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + #[tracing::instrument(skip(self))] + pub(crate) async fn typing_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()> { self.typing .write() .await @@ -40,11 +71,17 @@ impl Service { .write() .await .insert(room_id.to_owned(), services().globals.next_count()?); - let _ = self.typing_update_sender.send(room_id.to_owned()); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { + trace!( + "receiver found what it was looking for and is no longer \ + interested" + ); + } Ok(()) } - pub async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> { + #[tracing::instrument(skip(self))] + pub(crate) async fn wait_for_update(&self, room_id: &RoomId) -> Result<()> { let mut receiver = self.typing_update_sender.subscribe(); while let Ok(next) = receiver.recv().await { if next == room_id { @@ -56,6 +93,7 @@ impl Service { } /// Makes sure that typing events with old timestamps get removed. + #[tracing::instrument(skip(self, room_id))] async fn typings_maintain(&self, room_id: &RoomId) -> Result<()> { let current_timestamp = utils::millis_since_unix_epoch(); let mut removable = Vec::new(); @@ -81,13 +119,22 @@ impl Service { .write() .await .insert(room_id.to_owned(), services().globals.next_count()?); - let _ = self.typing_update_sender.send(room_id.to_owned()); + if self.typing_update_sender.send(room_id.to_owned()).is_err() { + trace!( + "receiver found what it was looking for and is no longer \ + interested" + ); + } } Ok(()) } /// Returns the count of the last typing update in this room. - pub async fn last_typing_update(&self, room_id: &RoomId) -> Result { + #[tracing::instrument(skip(self))] + pub(crate) async fn last_typing_update( + &self, + room_id: &RoomId, + ) -> Result { self.typings_maintain(room_id).await?; Ok(self .last_typing_update @@ -99,12 +146,13 @@ impl Service { } /// Returns a new typing EDU. - pub async fn typings_all( + #[tracing::instrument(skip(self))] + pub(crate) async fn typings_all( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result> { Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { + content: TypingEventContent { user_ids: self .typing .read() diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler.rs similarity index 50% rename from src/service/rooms/event_handler/mod.rs rename to src/service/rooms/event_handler.rs index b7817e50..2c28c025 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler.rs @@ -1,14 +1,10 @@ -/// An async function that can recursively call itself. -type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; - use std::{ collections::{hash_map, BTreeMap, HashMap, HashSet}, - pin::Pin, sync::Arc, time::{Duration, Instant, SystemTime}, }; -use futures_util::{stream::FuturesUnordered, Future, StreamExt}; +use futures_util::{future::BoxFuture, stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, @@ -23,27 +19,38 @@ use ruma::{ }, }, events::{ - room::{ - create::RoomCreateEventContent, redaction::RoomRedactionEventContent, - server_acl::RoomServerAclEventContent, - }, - StateEventType, TimelineEventType, + room::server_acl::RoomServerAclEventContent, StateEventType, + TimelineEventType, }, int, - serde::Base64, - state_res::{self, RoomVersion, StateMap}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, - OwnedServerName, OwnedServerSigningKeyId, RoomId, RoomVersionId, ServerName, + room_version_rules::RoomVersionRules, + state_res::{self, StateMap}, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, + MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedServerSigningKeyId, + RoomId, RoomVersionId, ServerName, }; +use serde::Deserialize; use serde_json::value::RawValue as RawJsonValue; use tokio::sync::{RwLock, RwLockWriteGuard, Semaphore}; use tracing::{debug, error, info, trace, warn}; -use crate::{service::*, services, Error, PduEvent, Result}; +use super::{ + short::ShortStateKey, state_compressor::CompressedStateEvent, + timeline::PduId, +}; +use crate::{ + service::{globals::SigningKeys, pdu, rooms::state::ExtractVersion}, + services, + utils::debug_slice_truncated, + Error, PduEvent, Result, +}; -use super::state_compressor::CompressedStateEvent; +#[derive(Deserialize)] +struct ExtractOriginServerTs { + origin_server_ts: MilliSecondsSinceUnixEpoch, +} -pub struct Service; +pub(crate) struct Service; impl Service { /// When receiving an event one needs to: @@ -52,34 +59,38 @@ impl Service { /// 1.1. Remove unsigned field /// 2. Check signatures, otherwise drop /// 3. Check content hash, redact if doesn't match - /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not - /// timeline events - /// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are - /// also rejected "due to auth events" - /// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + /// 4. Fetch any missing auth events doing all checks listed here starting + /// at 1. These are not timeline events + /// 5. Reject "due to auth events" if can't get all the auth events or some + /// of the auth events are also rejected "due to auth events" + /// 6. Reject "due to auth events" if the event doesn't pass auth based on + /// the auth events /// 7. Persist this event as an outlier /// 8. If not timeline event: stop - /// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline - /// events - /// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - /// doing all the checks in this list starting at 1. These are not timeline events + /// 9. Fetch any missing prev events doing all checks listed here starting + /// at 1. These are timeline events + /// 10. Fetch missing state and auth chain events by calling /state_ids at + /// backwards extremities doing all the checks in this list starting at + /// 1. These are not timeline events /// 11. Check the auth of the event passes based on the state of the event - /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by - /// doing state res where one of the inputs was a previously trusted set of state, don't just - /// trust a set of state we got from a remote) + /// 12. Ensure that the state is derived from the previous current state + /// (i.e. we calculated by doing state res where one of the inputs was a + /// previously trusted set of state, don't just trust a set of state we + /// got from a remote) /// 13. Use state resolution to find new room state - /// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it - // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively + /// 14. Check if the event passes auth based on the "current state" of the + /// room, if not soft fail it + #[allow(clippy::too_many_lines)] #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - value: BTreeMap, + value: CanonicalJsonObject, is_timeline_event: bool, - pub_key_map: &'a RwLock>>, - ) -> Result>> { + pub_key_map: &'a RwLock>, + ) -> Result> { // 0. Check the server is in the room if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( @@ -90,49 +101,43 @@ impl Service { if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Federation of this room is currently disabled on this server.", )); } - services().rooms.event_handler.acl_check(origin, room_id)?; + self.acl_check(origin, room_id)?; // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { - return Ok(Some(pdu_id.to_vec())); + return Ok(Some(pdu_id.clone())); } - let create_event = services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + let first_pdu_in_room = + services().rooms.timeline.first_pdu_in_room(room_id)?.ok_or_else( + || Error::bad_database("Failed to find first pdu in db."), + )?; - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - error!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; - let room_version_id = &create_event_content.room_version; - - let first_pdu_in_room = services() + let room_version_id = services() .rooms - .timeline - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + .state + .get_create_content::(room_id)?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id.clone())); + }; let (incoming_pdu, val) = self .handle_outlier_pdu( origin, - &create_event, event_id, room_id, + &room_version_rules, value, false, pub_key_map, ) .await?; - self.check_room_id(room_id, &incoming_pdu)?; + Self::check_room_id(room_id, &incoming_pdu)?; // 8. if not timeline event: stop if !is_timeline_event { @@ -144,13 +149,13 @@ impl Service { return Ok(None); } - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + // 9. Fetch any missing prev events doing all checks listed here + // starting at 1. These are timeline events let (sorted_prev_events, mut eventid_info) = self .fetch_unknown_prev_events( origin, - &create_event, room_id, - room_version_id, + &room_version_rules, pub_key_map, incoming_pdu.prev_events.clone(), ) @@ -162,8 +167,9 @@ impl Service { // Check for disabled again because it might have changed if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Federation of this room is currently disabled on this server.", + ErrorKind::forbidden(), + "Federation of this room is currently disabled on this \ + server.", )); } @@ -175,13 +181,14 @@ impl Service { .get(&*prev_id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", prev_id); + info!(event_id = %prev_id, "Backing off from prev event"); continue; } } @@ -199,7 +206,7 @@ impl Service { e.insert((Instant::now(), 1)); } hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) + *e.get_mut() = (Instant::now(), e.get().1 + 1); } } continue; @@ -217,21 +224,24 @@ impl Service { .roomid_federationhandletime .write() .await - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + .insert( + room_id.to_owned(), + ((*prev_id).to_owned(), start_time), + ); - if let Err(e) = self + if let Err(error) = self .upgrade_outlier_to_timeline_pdu( pdu, json, - &create_event, origin, room_id, + &room_version_id, pub_key_map, ) .await { errors += 1; - warn!("Prev event {} failed: {}", prev_id, e); + warn!(%error, event_id = %prev_id, "Prev event failed"); match services() .globals .bad_event_ratelimiter @@ -243,11 +253,10 @@ impl Service { e.insert((Instant::now(), 1)); } hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) + *e.get_mut() = (Instant::now(), e.get().1 + 1); } } } - let elapsed = start_time.elapsed(); services() .globals .roomid_federationhandletime @@ -255,10 +264,9 @@ impl Service { .await .remove(&room_id.to_owned()); debug!( - "Handling prev event {} took {}m{}s", - prev_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 + elapsed = ?start_time.elapsed(), + event_id = %prev_id, + "Finished handling prev event", ); } } @@ -272,15 +280,13 @@ impl Service { .write() .await .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = services() - .rooms - .event_handler + let r = self .upgrade_outlier_to_timeline_pdu( incoming_pdu, val, - &create_event, origin, room_id, + &room_version_id, pub_key_map, ) .await; @@ -294,46 +300,79 @@ impl Service { r } - #[allow(clippy::type_complexity, clippy::too_many_arguments)] - #[tracing::instrument(skip(self, create_event, value, pub_key_map))] + #[allow(clippy::too_many_arguments)] + #[tracing::instrument(skip(self, origin, room_id, value, pub_key_map))] fn handle_outlier_pdu<'a>( &'a self, origin: &'a ServerName, - create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - mut value: BTreeMap, + room_version_rules: &'a RoomVersionRules, + mut value: CanonicalJsonObject, auth_events_known: bool, - pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { + pub_key_map: &'a RwLock>, + ) -> BoxFuture<'a, Result<(Arc, CanonicalJsonObject)>> { Box::pin(async move { // 1.1. Remove unsigned field value.remove("unsigned"); - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - self.fetch_required_signing_keys(&value, pub_key_map) - .await?; - // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - error!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") + + // TODO: For RoomVersion6 we must check that Raw<..> is canonical, + // do we anywhere? + // + // https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch + // the corresponding signing keys + self.fetch_required_signing_keys(&value, pub_key_map).await?; + + let origin_server_ts = + value.get("origin_server_ts").ok_or_else(|| { + error!("Invalid PDU, no origin_server_ts field"); + Error::BadRequest( + ErrorKind::MissingParam, + "Invalid PDU, no origin_server_ts field", + ) })?; - let room_version_id = &create_event_content.room_version; - let room_version = - RoomVersion::new(room_version_id).expect("room version is supported"); + let origin_server_ts: MilliSecondsSinceUnixEpoch = { + let ts = origin_server_ts.as_integer().ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "origin_server_ts must be an integer", + ) + })?; + + MilliSecondsSinceUnixEpoch(i64::from(ts).try_into().map_err( + |_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Time must be after the unix epoch", + ) + }, + )?) + }; let guard = pub_key_map.read().await; - let mut val = match ruma::signatures::verify_event(&guard, &value, room_version_id) { - Err(e) => { + + let pkey_map = (*guard).clone(); + + // Removing all the expired keys, unless the room version allows + // stale keys + let filtered_keys = services() + .globals + .filter_keys_server_map(pkey_map, origin_server_ts); + + let mut val = match ruma::signatures::verify_event( + &filtered_keys, + &value, + room_version_rules, + ) { + Err(error) => { // Drop - warn!("Dropping bad event {}: {}", event_id, e,); + warn!(%event_id, %error, "Dropping bad event"); return Err(Error::BadRequest( ErrorKind::InvalidParam, "Signature verification failed", @@ -341,19 +380,26 @@ impl Service { } Ok(ruma::signatures::Verified::Signatures) => { // Redact - warn!("Calculated hash does not match: {}", event_id); - let obj = match ruma::canonical_json::redact(value, room_version_id, None) { - Ok(obj) => obj, - Err(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Redaction failed", - )) - } + warn!(%event_id, "Calculated hash does not match"); + let Ok(obj) = ruma::canonical_json::redact( + value, + &room_version_rules.redaction, + None, + ) else { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Redaction failed", + )); }; - // Skip the PDU if it is redacted and we already have it as an outlier event - if services().rooms.timeline.get_pdu_json(event_id)?.is_some() { + // Skip the PDU if it is redacted and we already have it as + // an outlier event + if services() + .rooms + .timeline + .get_pdu_json(event_id)? + .is_some() + { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Event was redacted and we already knew about it", @@ -367,23 +413,28 @@ impl Service { drop(guard); - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type + // Now that we have checked the signature and hashes we can add the + // eventID and convert to our PduEvent type val.insert( "event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()), ); let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + serde_json::to_value(&val) + .expect("CanonicalJsonObj is a valid JsonValue"), ) .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; - self.check_room_id(room_id, &incoming_pdu)?; + Self::check_room_id(room_id, &incoming_pdu)?; if !auth_events_known { - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often + // 4. fetch any missing auth events doing all checks listed here + // starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth + // events or some of the auth events are also rejected "due + // to auth events" + // NOTE: Step 5 is not applied anymore because it failed too + // often debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); self.fetch_and_handle_outliers( origin, @@ -392,32 +443,31 @@ impl Service { .iter() .map(|x| Arc::from(&**x)) .collect::>(), - create_event, room_id, - room_version_id, + room_version_rules, pub_key_map, ) .await; } - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + // 6. Reject "due to auth events" if the event doesn't pass auth + // based on the auth events debug!( - "Auth check for {} based on auth events", - incoming_pdu.event_id + event_id = %incoming_pdu.event_id, + "Starting auth check for event based on auth events", ); // Build map of auth events let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match services().rooms.timeline.get_pdu(id)? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } + for event_id in &incoming_pdu.auth_events { + let Some(auth_event) = + services().rooms.timeline.get_pdu(event_id)? + else { + warn!(%event_id, "Could not find auth event"); + continue; }; - self.check_room_id(room_id, &auth_event)?; + Self::check_room_id(room_id, &auth_event)?; match auth_events.entry(( auth_event.kind.to_string().into(), @@ -432,7 +482,8 @@ impl Service { hash_map::Entry::Occupied(_) => { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times.", + "Auth event's type and state_key combination \ + exists multiple times.", )); } } @@ -440,9 +491,7 @@ impl Service { // The original create event must be in the auth events if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()), + auth_events.get(&(StateEventType::RoomCreate, String::new())), Some(_) | None ) { return Err(Error::BadRequest( @@ -451,21 +500,16 @@ impl Service { )); } - if !state_res::event_auth::auth_check( - &room_version, + state_res::event_auth::auth_check( + &room_version_rules.authorization, &incoming_pdu, - None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Auth check failed", - )); - } + .map_err(|_e| { + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") + })?; - debug!("Validation successful."); + debug!("Validation successful"); // 7. Persist the event as an outlier. services() @@ -473,24 +517,29 @@ impl Service { .outlier .add_pdu_outlier(&incoming_pdu.event_id, &val)?; - debug!("Added pdu as outlier."); + debug!("Added pdu as outlier"); Ok((Arc::new(incoming_pdu), val)) }) } - #[tracing::instrument(skip(self, incoming_pdu, val, create_event, pub_key_map))] - pub async fn upgrade_outlier_to_timeline_pdu( + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip_all, fields( + incoming_pdu = %incoming_pdu.event_id, + ))] + pub(crate) async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, + val: CanonicalJsonObject, origin: &ServerName, room_id: &RoomId, - pub_key_map: &RwLock>>, - ) -> Result>> { + room_version_id: &RoomVersionId, + pub_key_map: &RwLock>, + ) -> Result> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(Some(pduid)) = + services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) + { return Ok(Some(pduid)); } @@ -505,22 +554,22 @@ impl Service { )); } - info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + debug!( + event_id = %incoming_pdu.event_id, + "Upgrading event to timeline pdu", + ); - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id.clone())); + }; - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + // 10. Fetch missing state and auth chain events by calling /state_ids + // at backwards extremities doing all the checks in this list + // starting at 1. These are not timeline events. - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. - - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event + // TODO: if we know the prev_events of the incoming event we can avoid + // the request and build the state from a known point and + // resolve if > 1 prev_event debug!("Requesting state at event"); let mut state_at_incoming_event = None; @@ -553,14 +602,17 @@ impl Service { .ok() .flatten() .ok_or_else(|| { - Error::bad_database("Could not find prev event, but we know the state.") + Error::bad_database( + "Could not find prev event, but we know the state.", + ) })?; if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - )?; + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + )?; state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu @@ -574,21 +626,18 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = - if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; + let Ok(Some(prev_event)) = + services().rooms.timeline.get_pdu(prev_eventid) + else { + okay = false; + break; + }; - let sstatehash = if let Ok(Some(s)) = services() + let Ok(Some(sstatehash)) = services() .rooms .state_accessor .pdu_shortstatehash(prev_eventid) - { - s - } else { + else { okay = false; break; }; @@ -597,8 +646,10 @@ impl Service { } if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + let mut fork_states = + Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = + Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { let mut leaf_state: HashMap<_, _> = services() @@ -608,25 +659,36 @@ impl Service { .await?; if let Some(state_key) = &prev_event.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - )?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + )?; + leaf_state.insert( + shortstatekey, + Arc::from(&*prev_event.event_id), + ); // Now it's the state after the pdu } let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); + let mut starting_events = + Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) + if let Ok((ty, st_key)) = + services().rooms.short.get_statekey_from_short(k) { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); + state.insert( + (ty.to_string().into(), st_key), + id.clone(), + ); } else { - warn!("Failed to get_statekey_from_short."); + warn!("Failed to get_statekey_from_short"); } starting_events.push(id); } @@ -645,14 +707,18 @@ impl Service { let lock = services().globals.stateres_mutex.lock(); - let result = - state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = services().rooms.timeline.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); + let result = state_res::resolve( + &room_version_rules.authorization, + &fork_states, + auth_chain_sets, + |event_id| { + let res = services().rooms.timeline.get_pdu(event_id); + if let Err(error) = &res { + error!(%error, %event_id, "Failed to fetch event"); } res.ok().flatten() - }); + }, + ); drop(lock); state_at_incoming_event = match result { @@ -660,8 +726,10 @@ impl Service { new_state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = - services().rooms.short.get_or_create_shortstatekey( + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, )?; @@ -669,8 +737,12 @@ impl Service { }) .collect::>()?, ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + Err(error) => { + warn!( + %error, + "State resolution on prev events failed, either \ + an event could not be found or deserialization" + ); None } } @@ -679,8 +751,9 @@ impl Service { if state_at_incoming_event.is_none() { debug!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events + // Call /state_ids to find out what the state at this pdu is. We + // trust the server's response to some extend, but we + // still do a lot of checks on the events match services() .sending .send_federation_request( @@ -693,7 +766,7 @@ impl Service { .await { Ok(res) => { - debug!("Fetching state events at event."); + debug!("Fetching state events at event"); let collect = res .pdu_ids .iter() @@ -703,44 +776,72 @@ impl Service { .fetch_and_handle_outliers( origin, &collect, - create_event, room_id, - room_version_id, + &room_version_rules, pub_key_map, ) .await; let mut state: HashMap<_, Arc> = HashMap::new(); for (pdu, _) in state_vec { - let state_key = pdu.state_key.clone().ok_or_else(|| { - Error::bad_database("Found non-state pdu in state events.") - })?; + let state_key = + pdu.state_key.clone().ok_or_else(|| { + Error::bad_database( + "Found non-state pdu in state events.", + ) + })?; - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - )?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + )?; match state.entry(shortstatekey) { hash_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); } - hash_map::Entry::Occupied(_) => return Err( - Error::bad_database("State event's type and state_key combination exists multiple times."), - ), + hash_map::Entry::Occupied(_) => { + return Err(Error::bad_database( + "State event's type and state_key \ + combination exists multiple times.", + )) + } } } - // The original create event must still be in the state - let create_shortstatekey = services() - .rooms - .short - .get_shortstatekey(&StateEventType::RoomCreate, "")? - .expect("Room exists"); + let new_create_event = state + .get( + &services() + .rooms + .short + .get_shortstatekey( + &StateEventType::RoomCreate, + "", + )? + .expect("Room exists"), + ) + .map(|x| &**x) + .ok_or(Error::BadServerResponse( + "state_ids response did not contain an \ + m.room.create event", + ))?; - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { + let original_create_event = &*services() + .rooms + .state_accessor + .room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )? + .expect("Room exists") + .event_id; + + // The original create event must still be in the state + if new_create_event != original_create_event { return Err(Error::bad_database( "Incoming event refers to wrong create event.", )); @@ -748,9 +849,9 @@ impl Service { state_at_incoming_event = Some(state); } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err(e); + Err(error) => { + warn!(%error, "Fetching state for event failed"); + return Err(error); } }; } @@ -759,11 +860,11 @@ impl Service { state_at_incoming_event.expect("we always set this to some above"); debug!("Starting auth check"); - // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( - &room_version, + // 11. Check the auth of the event passes based on the state of the + // event + state_res::event_auth::auth_check( + &room_version_rules.authorization, &incoming_pdu, - None::, // TODO: third party invite |k, s| { services() .rooms @@ -771,100 +872,67 @@ impl Service { .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) + .and_then(|shortstatekey| { + state_at_incoming_event.get(&shortstatekey) + }) + .and_then(|event_id| { + services() + .rooms + .timeline + .get_pdu(event_id) + .ok() + .flatten() + }) }, ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; - - if !check_result { - return Err(Error::bad_database( - "Event has failed auth check with state at the event.", - )); - } + .map_err(|_e| { + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed.") + })?; debug!("Auth check succeeded"); // Soft fail check before doing state res let auth_events = services().rooms.state.get_auth_events( room_id, + &room_version_rules.authorization, &incoming_pdu.kind, &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), &incoming_pdu.content, )?; - let soft_fail = !state_res::event_auth::auth_check( - &room_version, + let auth_fail_against_current = state_res::event_auth::auth_check( + &room_version_rules.authorization, &incoming_pdu, - None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))? - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !services().rooms.state_accessor.user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - )? - } else { - false - } - } - RoomVersionId::V11 => { - let content = serde_json::from_str::( - incoming_pdu.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?; + .is_err(); - if let Some(redact_id) = &content.redacts { - !services().rooms.state_accessor.user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - )? - } else { - false - } - } - _ => { - unreachable!("Validity of room version already checked") - } - }; + let cannot_redact = incoming_pdu.kind + == TimelineEventType::RoomRedaction + && !services() + .rooms + .state_accessor + .redaction_event_allowed(&incoming_pdu)?; + let soft_fail = auth_fail_against_current || cannot_redact; // 13. Use state resolution to find new room state // We start looking at current room state now, so lets lock the room - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; + let room_token = services() + .globals + .roomid_mutex_state + .lock_key(room_id.to_owned()) + .await; - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) + // Now we calculate the set of extremities this room has after the + // incoming event has been applied. We start with the previous + // extremities (aka leaves) debug!("Calculating extremities"); - let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; + let mut extremities = + services().rooms.state.get_forward_extremities(room_id)?; - // Remove any forward extremities that are referenced by this incoming event's prev_events + // Remove any forward extremities that are referenced by this incoming + // event's prev_events for prev_event in &incoming_pdu.prev_events { if extremities.contains(prev_event) { extremities.remove(prev_event); @@ -874,10 +942,7 @@ impl Service { // Only keep those extremities were not referenced yet extremities.retain(|id| { !matches!( - services() - .rooms - .pdu_metadata - .is_event_referenced(room_id, id), + services().rooms.pdu_metadata.is_event_referenced(room_id, id), Ok(true) ) }); @@ -901,16 +966,18 @@ impl Service { // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - )?; + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + state_after + .insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } let new_room_state = self - .resolve_state(room_id, room_version_id, state_after) + .resolve_state(room_id, &room_version_rules, state_after) .await?; // Set the new room state to the resolved state @@ -924,11 +991,12 @@ impl Service { services() .rooms .state - .force_state(room_id, sstatehash, new, removed, &state_lock) + .force_state(&room_token, sstatehash, new, removed) .await?; } - // 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it + // 14. Check if the event passes auth based on the "current state" of + // the room, if not soft fail it debug!("Starting soft fail auth check"); if soft_fail { @@ -941,12 +1009,13 @@ impl Service { extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, - &state_lock, + &room_token, ) .await?; - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); + // Soft fail, we keep the event as an outlier but don't add it to + // the timeline + warn!("Event was soft failed"); services() .rooms .pdu_metadata @@ -973,22 +1042,23 @@ impl Service { extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, - &state_lock, + &room_token, ) .await?; debug!("Appended incoming pdu"); // Event has passed all auth/stateres checks - drop(state_lock); + drop(room_token); Ok(pdu_id) } + #[tracing::instrument(skip(self, room_version_rules, incoming_state))] async fn resolve_state( &self, room_id: &RoomId, - room_version_id: &RoomVersionId, - incoming_state: HashMap>, + room_version_rules: &RoomVersionRules, + incoming_state: HashMap>, ) -> Result>> { debug!("Loading current room state ids"); let current_sstatehash = services() @@ -1011,7 +1081,7 @@ impl Service { services() .rooms .auth_chain - .get_auth_chain(room_id, state.iter().map(|(_, id)| id.clone()).collect()) + .get_auth_chain(room_id, state.values().cloned().collect()) .await? .collect(), ); @@ -1028,7 +1098,9 @@ impl Service { .rooms .short .get_statekey_from_short(k) - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .map(|(ty, st_key)| { + ((ty.to_string().into(), st_key), id) + }) .ok() }) .collect::>() @@ -1039,36 +1111,37 @@ impl Service { let fetch_event = |id: &_| { let res = services().rooms.timeline.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); + if let Err(error) = &res { + error!(%error, "Failed to fetch event"); } res.ok().flatten() }; let lock = services().globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, + let Ok(state) = state_res::resolve( + &room_version_rules.authorization, &fork_states, auth_chain_sets, fetch_event, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); - } + ) else { + return Err(Error::bad_database( + "State resolution failed, either an event could not be found \ + or deserialization", + )); }; drop(lock); - debug!("State resolution done. Compressing state"); + debug!("State resolution done; compressing state"); let new_room_state = state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; services() .rooms .state_compressor @@ -1088,18 +1161,15 @@ impl Service { /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? - #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( &'a self, origin: &'a ServerName, events: &'a [Arc], - create_event: &'a PduEvent, room_id: &'a RoomId, - room_version_id: &'a RoomVersionId, - pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> - { + room_version_rules: &'a RoomVersionRules, + pub_key_map: &'a RwLock>, + ) -> BoxFuture<'a, Vec<(Arc, Option)>> { Box::pin(async move { let back_off = |id| async move { match services() @@ -1113,26 +1183,28 @@ impl Service { e.insert((Instant::now(), 1)); } hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) + *e.get_mut() = (Instant::now(), e.get().1 + 1); } } }; let mut pdus = vec![]; - for id in events { + for event_id in events { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { - trace!("Found {} in db", id); + if let Ok(Some(local_pdu)) = + services().rooms.timeline.get_pdu(event_id) + { + trace!(%event_id, "Found event locally"); pdus.push((local_pdu, None)); continue; } // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; + // We also handle its auth chain here so we don't get a stack + // overflow in handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(event_id)]; let mut events_in_reverse_order = Vec::new(); let mut events_all = HashSet::new(); let mut i = 0; @@ -1147,12 +1219,18 @@ impl Service { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + if min_elapsed_duration + > Duration::from_secs(60 * 60 * 24) + { + min_elapsed_duration = + Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); + info!( + event_id = %next_id, + "Backing off from event", + ); continue; } } @@ -1166,13 +1244,18 @@ impl Service { tokio::task::yield_now().await; } - if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { - trace!("Found {} in db", next_id); + if let Ok(Some(_)) = + services().rooms.timeline.get_pdu(&next_id) + { + trace!(event_id = %next_id, "Found event locally"); continue; } - info!("Fetching {} over federation.", next_id); - match services() + info!( + event_id = %next_id, + "Fetching event over federation", + ); + if let Ok(res) = services() .sending .send_federation_request( origin, @@ -1182,46 +1265,48 @@ impl Service { ) .await { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match pdu::gen_event_id_canonical_json(&res.pdu, room_version_id) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()).await; - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); + info!(event_id = %next_id, "Got event over federation"); + let Ok((calculated_event_id, value)) = + pdu::gen_event_id_canonical_json( + &res.pdu, + room_version_rules, + ) + else { back_off((*next_id).to_owned()).await; + continue; + }; + + if calculated_event_id != *next_id { + warn!( + expected_event_id = %next_id, + actual_event_id = %calculated_event_id, + "Server returned an event with a different ID \ + than requested", + ); } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = serde_json::from_value( + auth_event.clone().into(), + ) { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } else { + warn!(event_id = %next_id, "Failed to fetch event"); + back_off((*next_id).to_owned()).await; } } @@ -1236,12 +1321,18 @@ impl Service { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + if min_elapsed_duration + > Duration::from_secs(60 * 60 * 24) + { + min_elapsed_duration = + Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", next_id); + info!( + event_id = %next_id, + "Backing off from event", + ); continue; } } @@ -1249,9 +1340,9 @@ impl Service { match self .handle_outlier_pdu( origin, - create_event, next_id, room_id, + room_version_rules, value.clone(), true, pub_key_map, @@ -1259,12 +1350,16 @@ impl Service { .await { Ok((pdu, json)) => { - if next_id == id { + if next_id == event_id { pdus.push((pdu, Some(json))); } } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); + Err(error) => { + warn!( + event_id = %next_id, + %error, + "Event failed auth checks", + ); back_off((**next_id).to_owned()).await; } } @@ -1274,102 +1369,103 @@ impl Service { }) } + #[tracing::instrument(skip_all)] + #[allow(clippy::type_complexity)] async fn fetch_unknown_prev_events( &self, origin: &ServerName, - create_event: &PduEvent, room_id: &RoomId, - room_version_id: &RoomVersionId, - pub_key_map: &RwLock>>, + room_version_rules: &RoomVersionRules, + pub_key_map: &RwLock>, initial_set: Vec>, ) -> Result<( Vec>, - HashMap, (Arc, BTreeMap)>, + HashMap, (Arc, CanonicalJsonObject)>, )> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; - let first_pdu_in_room = services() - .rooms - .timeline - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + let first_pdu_in_room = + services().rooms.timeline.first_pdu_in_room(room_id)?.ok_or_else( + || Error::bad_database("Failed to find first pdu in db."), + )?; let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = self + let Some((pdu, json_opt)) = self .fetch_and_handle_outliers( origin, &[prev_event_id.clone()], - create_event, room_id, - room_version_id, + room_version_rules, pub_key_map, ) .await .pop() - { - self.check_room_id(room_id, &pdu)?; - - if amount > services().globals.max_fetch_prev_events() { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = json_opt.or_else(|| { - services() - .rooms - .outlier - .get_outlier_pdu_json(&prev_event_id) - .ok() - .flatten() - }) { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(prev_prev.clone()); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed + else { graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + }; + + Self::check_room_id(room_id, &pdu)?; + + if amount > services().globals.max_fetch_prev_events() { + warn!("Max prev event limit reached"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } + + let Some(json) = json_opt.or_else(|| { + services() + .rooms + .outlier + .get_outlier_pdu_json(&prev_event_id) + .ok() + .flatten() + }) else { + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + }; + + if pdu.origin_server_ts <= first_pdu_in_room.origin_server_ts { + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + amount += 1; + + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); } - let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| Error::bad_database("Error sorting prev events"))?; + let sorted = + state_res::lexicographical_topological_sort(&graph, |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info.get(event_id).map_or_else( + || uint!(0), + |info| info.0.origin_server_ts, + ), + ), + )) + }) + .map_err(|_| Error::bad_database("Error sorting prev events"))?; Ok((sorted, eventid_info)) } @@ -1377,8 +1473,8 @@ impl Service { #[tracing::instrument(skip_all)] pub(crate) async fn fetch_required_signing_keys( &self, - event: &BTreeMap, - pub_key_map: &RwLock>>, + event: &CanonicalJsonObject, + pub_key_map: &RwLock>, ) -> Result<()> { let signatures = event .get("signatures") @@ -1390,81 +1486,93 @@ impl Service { "Invalid signatures object in server response pdu.", ))?; - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys + // We go through all the signatures we see on the value and fetch the + // corresponding signing keys for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; + let signature_object = + signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; - let signature_ids = signature_object.keys().cloned().collect::>(); + let signature_ids = + signature_object.keys().cloned().collect::>(); let fetch_res = self .fetch_signing_keys( signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse( - "Invalid servername in signatures of server response pdu.", + "Invalid servername in signatures of server \ + response pdu.", ) })?, signature_ids, + true, ) .await; - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); - continue; - } + let Ok(keys) = fetch_res else { + warn!("Failed to fetch signing key"); + continue; }; - pub_key_map - .write() - .await - .insert(signature_server.clone(), keys); + pub_key_map.write().await.insert(signature_server.clone(), keys); } Ok(()) } - // Gets a list of servers for which we don't have the signing key yet. We go over - // the PDUs and either cache the key or add it to the list that needs to be retrieved. + // Gets a list of servers for which we don't have the signing key yet. We go + // over the PDUs and either cache the key or add it to the list that + // needs to be retrieved. + #[tracing::instrument(skip_all)] async fn get_server_keys_from_cache( &self, pdu: &RawJsonValue, - servers: &mut BTreeMap>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + servers: &mut BTreeMap< + OwnedServerName, + BTreeMap, + >, + room_version_rules: &RoomVersionRules, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>, ) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()) + .map_err(|error| { + error!(%error, ?pdu, "Invalid PDU in server response"); + Error::BadServerResponse("Invalid PDU in server response") + })?; let event_id = format!( "${}", - ruma::signatures::reference_hash(&value, room_version) + ruma::signatures::reference_hash(&value, room_version_rules) .expect("ruma can calculate reference hashes") ); let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .await - .get(event_id) + let ExtractOriginServerTs { + origin_server_ts, + } = ExtractOriginServerTs::deserialize(pdu).map_err(|_| { + Error::BadServerResponse( + "Invalid PDU in server response, origin_server_ts field is \ + missing or invalid", + ) + })?; + + if let Some((time, tries)) = + services().globals.bad_event_ratelimiter.read().await.get(event_id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(30) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); + debug!(%event_id, "Backing off from event"); + return Err(Error::BadServerResponse( + "Bad event, still backing off", + )); } } @@ -1479,49 +1587,68 @@ impl Service { ))?; for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; + let signature_object = + signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; - let signature_ids = signature_object.keys().cloned().collect::>(); + let signature_ids = + signature_object.keys().cloned().collect::>(); - let contains_all_ids = |keys: &BTreeMap| { - signature_ids.iter().all(|id| keys.contains_key(id)) + let contains_all_ids = |keys: &SigningKeys| { + signature_ids.iter().all(|id| { + keys.verify_keys.get(id).is_some_and(|_| { + keys.valid_until_ts >= origin_server_ts + }) || keys + .old_verify_keys + .get(id) + .is_some_and(|v| v.expired_ts >= origin_server_ts) + }) }; - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; + let origin = <&ServerName>::try_from(signature_server.as_str()) + .map_err(|_| { + Error::BadServerResponse( + "Invalid servername in signatures of server response \ + pdu.", + ) + })?; - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + // check that we have the server in our list already, or + // all `signature_ids` are in pub_key_map + // if yes, we don't have to do anything + if servers.contains_key(origin) + || pub_key_map + .get(origin.as_str()) + .is_some_and(contains_all_ids) + { continue; } - trace!("Loading signing keys for {}", origin); + trace!(server = %origin, "Loading signing keys for other server"); - let result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); + let result = services().globals.signing_keys_for(origin)?; + if !result.as_ref().is_some_and(contains_all_ids) { + trace!( + server = %origin, + "Signing key not loaded for server", + ); servers.insert(origin.to_owned(), BTreeMap::new()); } - - pub_key_map.insert(origin.to_string(), result); + if let Some(result) = result { + pub_key_map.insert(origin.to_string(), result); + } } Ok(()) } + #[allow(clippy::too_many_lines)] pub(crate) async fn fetch_join_signing_keys( &self, event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + room_version_rules: &RoomVersionRules, + pub_key_map: &RwLock>, ) -> Result<()> { let mut servers: BTreeMap< OwnedServerName, @@ -1533,19 +1660,27 @@ impl Service { // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = self - .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) - .await; - } - for pdu in &event.room_state.auth_chain { - let _ = self - .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) - .await; + for pdu in event + .room_state + .state + .iter() + .chain(&event.room_state.auth_chain) + { + if let Err(error) = self + .get_server_keys_from_cache( + pdu, + &mut servers, + room_version_rules, + &mut pkm, + ) + .await + { + debug!(%error, "Failed to get server keys from cache"); + }; } drop(pkm); - } + }; if servers.is_empty() { info!("We had all keys locally"); @@ -1553,7 +1688,7 @@ impl Service { } for server in services().globals.trusted_servers() { - info!("Asking batch signing keys from trusted server {}", server); + info!(%server, "Asking batch signing keys from trusted server"); if let Ok(keys) = services() .sending .send_federation_request( @@ -1564,17 +1699,18 @@ impl Service { ) .await { - trace!("Got signing keys: {:?}", keys); + trace!(signing_keys = ?keys, "Got signing keys"); let mut pkm = pub_key_map.write().await; for k in keys.server_keys { let k = match k.deserialize() { Ok(key) => key, - Err(e) => { + Err(error) => { warn!( - "Received error {} while fetching keys from trusted server {}", - e, server + %error, + %server, + object = ?k.json(), + "Failed to fetch keys from trusted server", ); - warn!("{}", k.into_json()); continue; } }; @@ -1584,10 +1720,10 @@ impl Service { let result = services() .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); + .add_signing_key_from_trusted_server( + &k.server_name, + k.clone(), + )?; pkm.insert(k.server_name.to_string(), result); } @@ -1599,14 +1735,17 @@ impl Service { } } - info!("Asking individual servers for signing keys: {servers:?}"); + info!(?servers, "Asking individual servers for signing keys"); let mut futures: FuturesUnordered<_> = servers .into_keys() .map(|server| async move { ( services() .sending - .send_federation_request(&server, get_server_keys::v2::Request::new()) + .send_federation_request( + &server, + get_server_keys::v2::Request::new(), + ) .await, server, ) @@ -1614,17 +1753,16 @@ impl Service { .collect(); while let Some(result) = futures.next().await { - info!("Received new result"); if let (Ok(get_keys_response), origin) = result { - info!("Result is from {origin}"); + info!(server = %origin, "Received new result from server"); if let Ok(key) = get_keys_response.server_key.deserialize() { - let result: BTreeMap<_, _> = services() + let result = services() .globals - .add_signing_key(&origin, key)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - pub_key_map.write().await.insert(origin.to_string(), result); + .add_signing_key_from_origin(&origin, key)?; + pub_key_map + .write() + .await + .insert(origin.to_string(), result); } } info!("Done handling result"); @@ -1636,24 +1774,33 @@ impl Service { } /// Returns Ok if the acl allows the server - pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = match services().rooms.state_accessor.room_state_get( + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + #[tracing::instrument(skip_all)] + pub(crate) fn acl_check( + &self, + server_name: &ServerName, + room_id: &RoomId, + ) -> Result<()> { + let Some(acl_event) = services().rooms.state_accessor.room_state_get( room_id, &StateEventType::RoomServerAcl, "", - )? { - Some(acl) => acl, - None => return Ok(()), + )? + else { + return Ok(()); }; - let acl_event_content: RoomServerAclEventContent = - match serde_json::from_str(acl_event.content.get()) { - Ok(content) => content, - Err(_) => { - warn!("Invalid ACL event"); - return Ok(()); - } - }; + let acl_event_content = match serde_json::from_str::< + RoomServerAclEventContent, + >(acl_event.content.get()) + { + Ok(x) => x, + Err(error) => { + warn!(%error, "Invalid ACL event"); + return Ok(()); + } + }; if acl_event_content.allow.is_empty() { // Ignore broken acl events @@ -1664,49 +1811,59 @@ impl Service { Ok(()) } else { info!( - "Server {} was denied by room ACL in {}", - server_name, room_id + server = %server_name, + %room_id, + "Other server was denied by room ACL", ); Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Server was denied by room ACL", )) } } - /// Search the DB for the signing keys of the given server, if we don't have them - /// fetch them from the server and save to our DB. - #[tracing::instrument(skip_all)] - pub async fn fetch_signing_keys( + /// Search the DB for the signing keys of the given server, if we don't have + /// them fetch them from the server and save to our DB. + #[allow(clippy::too_many_lines)] + #[tracing::instrument( + skip(self, signature_ids), + fields(signature_ids = debug_slice_truncated(&signature_ids, 3)) + )] + pub(crate) async fn fetch_signing_keys( &self, origin: &ServerName, signature_ids: Vec, - ) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + // Whether to ask for keys from trusted servers. Should be false when + // getting keys for validating requests, as per MSC4029 + query_via_trusted_servers: bool, + ) -> Result { + let contains_all_ids = |keys: &SigningKeys| { + for id in &signature_ids { + let in_verify_keys = keys + .verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id); + let in_old_verify_keys = keys + .old_verify_keys + .keys() + .map(ToString::to_string) + .any(|key_id| id == &key_id); + + if !in_verify_keys && !in_old_verify_keys { + trace!(id, "signature key not yet in known set"); + return false; + } + } + true + }; let permit = services() .globals .servername_ratelimiter - .read() - .await - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - - let permit = match permit { - Some(p) => p, - None => { - let mut write = services().globals.servername_ratelimiter.write().await; - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); - - s.acquire_owned() - } - } - .await; + .get_or_insert_with(origin.to_owned(), || Semaphore::new(1)) + .await; + let permit = permit.acquire().await; let back_off = |id| async { match services() @@ -1719,7 +1876,9 @@ impl Service { hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1); + } } }; @@ -1731,120 +1890,236 @@ impl Service { .get(&signature_ids) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(30) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); + debug!(?signature_ids, "Backing off from signatures"); + return Err(Error::BadServerResponse( + "bad signature, still backing off", + )); } } - trace!("Loading signing keys for {}", origin); + trace!("Loading signing keys from database"); - let mut result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); + let result = services().globals.signing_keys_for(origin)?; - if contains_all_ids(&result) { - return Ok(result); + let mut expires_soon_or_has_expired = false; + + if let Some(result) = result.clone() { + let ts_threshold = MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(30 * 60), + ) + .expect("Should be valid until year 500,000,000"); + + debug!( + server = %origin, + key_ids = ?result.verify_keys.keys().collect::>(), + old_key_ids = + ?result.old_verify_keys.keys().collect::>(), + ts_threshold = %ts_threshold.get(), + ts_valid_until = %result.valid_until_ts.get(), + "Loaded signing keys for server", + ); + + if contains_all_ids(&result) { + // We want to ensure that the keys remain valid by the time the + // other functions that handle signatures reach them + if result.valid_until_ts > ts_threshold { + debug!( + origin = %origin, + valid_until_ts = %result.valid_until_ts.get(), + "Keys are valid because they expire after threshold", + ); + return Ok(result); + } + + expires_soon_or_has_expired = true; + trace!("Found all keys, but they will expire too soon"); + } } - debug!("Fetching signing keys for {} over federation", origin); + let mut keys = result.unwrap_or_else(|| SigningKeys { + verify_keys: BTreeMap::new(), + old_verify_keys: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::now(), + }); - if let Some(server_key) = services() + // We want to set this to the max, and then lower it whenever we see + // older keys + keys.valid_until_ts = MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"); + + debug!("Fetching signing keys over federation"); + + if let Some(mut server_key) = services() .sending - .send_federation_request(origin, get_server_keys::v2::Request::new()) + .send_federation_request( + origin, + get_server_keys::v2::Request::new(), + ) .await .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { + // Keys should only be valid for a maximum of seven days + server_key.valid_until_ts = server_key.valid_until_ts.min( + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(7 * 86400), + ) + .expect("Should be valid until year 500,000,000"), + ); + services() .globals - .add_signing_key(origin, server_key.clone())?; + .add_signing_key_from_origin(origin, server_key.clone())?; - result.extend( + if keys.valid_until_ts > server_key.valid_until_ts { + keys.valid_until_ts = server_key.valid_until_ts; + } + + keys.verify_keys.extend( server_key .verify_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), + .map(|(id, key)| (id.to_string(), key)), ); - result.extend( + keys.old_verify_keys.extend( server_key .old_verify_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), + .map(|(id, key)| (id.to_string(), key)), ); - if contains_all_ids(&result) { - return Ok(result); + if contains_all_ids(&keys) { + return Ok(keys); } } - for server in services().globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin.to_owned(), - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) - { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - services().globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys + if query_via_trusted_servers { + for server in services().globals.trusted_servers() { + debug!( + trusted_server = %server, + origin = %origin, + "Asking trusted server for signing keys", + ); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin.to_owned(), + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!( + ?server_keys, + "Got signing keys from trusted server" ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } + for mut k in server_keys { + // Half an hour should give plenty of time for the + // server to respond with keys that are still + // valid, given we requested keys which are valid at + // least an hour from now + let in_half_hour = + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + + Duration::from_secs(30 * 60), + ) + .expect("Should be valid until year 500,000,000"); + if k.valid_until_ts < in_half_hour { + // Keys should only be valid for a maximum of seven + // days + k.valid_until_ts = k.valid_until_ts.min( + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + + Duration::from_secs(7 * 86400), + ) + .expect( + "Should be valid until year 500,000,000", + ), + ); - if contains_all_ids(&result) { - return Ok(result); + if keys.valid_until_ts > k.valid_until_ts { + keys.valid_until_ts = k.valid_until_ts; + } + + services() + .globals + .add_signing_key_from_trusted_server( + origin, + k.clone(), + )?; + keys.verify_keys.extend( + k.verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)), + ); + keys.old_verify_keys.extend( + k.old_verify_keys + .into_iter() + .map(|(id, key)| (id.to_string(), key)), + ); + } else { + warn!( + origin = %origin, + valid_until = %k.valid_until_ts.get(), + "Server gave us keys older than we \ + requested", + ); + } + + if contains_all_ids(&keys) { + return Ok(keys); + } + } } } } + // We should return these keys if fresher keys were not found + if expires_soon_or_has_expired { + info!(origin = %origin, "Returning stale keys"); + return Ok(keys); + } + drop(permit); back_off(signature_ids).await; - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) + warn!("Failed to find all public keys"); + Err(Error::BadServerResponse("Failed to find public key for server")) } - fn check_room_id(&self, room_id: &RoomId, pdu: &PduEvent) -> Result<()> { + #[tracing::instrument(skip_all)] + fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result<()> { if pdu.room_id != room_id { - warn!("Found event from room {} in room {}", pdu.room_id, room_id); + warn!( + event_id = %pdu.event_id, + expected_room_id = %pdu.room_id, + actual_room_id = %room_id, + "Event has wrong room ID", + ); return Err(Error::BadRequest( ErrorKind::InvalidParam, "Event has wrong room id", diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading.rs similarity index 72% rename from src/service/rooms/lazy_loading/mod.rs rename to src/service/rooms/lazy_loading.rs index e2594a0a..4c0508f1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading.rs @@ -1,37 +1,48 @@ -mod data; use std::collections::{HashMap, HashSet}; -pub use data::Data; use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use tokio::sync::Mutex; +use super::timeline::PduCount; use crate::Result; -use super::timeline::PduCount; +mod data; -pub struct Service { - pub db: &'static dyn Data, +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, #[allow(clippy::type_complexity)] - pub lazy_load_waiting: - Mutex>>, + lazy_load_waiting: Mutex< + HashMap< + (OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), + HashSet, + >, + >, } impl Service { + pub(crate) fn new(db: &'static dyn Data) -> Self { + Self { + db, + lazy_load_waiting: Mutex::new(HashMap::new()), + } + } + #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( + pub(crate) fn lazy_load_was_sent_before( &self, user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, ll_user: &UserId, ) -> Result { - self.db - .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) + self.db.lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } #[tracing::instrument(skip(self))] - pub async fn lazy_load_mark_sent( + pub(crate) async fn lazy_load_mark_sent( &self, user_id: &UserId, device_id: &DeviceId, @@ -51,7 +62,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub async fn lazy_load_confirm_delivery( + pub(crate) async fn lazy_load_confirm_delivery( &self, user_id: &UserId, device_id: &DeviceId, @@ -70,15 +81,13 @@ impl Service { room_id, &mut user_ids.iter().map(|u| &**u), )?; - } else { - // Ignore } Ok(()) } #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( + pub(crate) fn lazy_load_reset( &self, user_id: &UserId, device_id: &DeviceId, diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 9af8e21b..95bf83d8 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,7 +1,8 @@ -use crate::Result; use ruma::{DeviceId, RoomId, UserId}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/service/rooms/metadata.rs b/src/service/rooms/metadata.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/metadata.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 339db573..81dd44ff 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,9 +1,13 @@ -use crate::Result; use ruma::{OwnedRoomId, RoomId}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { + /// Checks if a room exists. fn exists(&self, room_id: &RoomId) -> Result; - fn iter_ids<'a>(&'a self) -> Box> + 'a>; + fn iter_ids<'a>( + &'a self, + ) -> Box> + 'a>; fn is_disabled(&self, room_id: &RoomId) -> Result; fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs deleted file mode 100644 index d1884691..00000000 --- a/src/service/rooms/metadata/mod.rs +++ /dev/null @@ -1,30 +0,0 @@ -mod data; - -pub use data::Data; -use ruma::{OwnedRoomId, RoomId}; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - self.db.exists(room_id) - } - - pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { - self.db.iter_ids() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - self.db.is_disabled(room_id) - } - - pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { - self.db.disable_room(room_id, disabled) - } -} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs deleted file mode 100644 index f0739841..00000000 --- a/src/service/rooms/mod.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub mod alias; -pub mod auth_chain; -pub mod directory; -pub mod edus; -pub mod event_handler; -pub mod lazy_loading; -pub mod metadata; -pub mod outlier; -pub mod pdu_metadata; -pub mod search; -pub mod short; -pub mod spaces; -pub mod state; -pub mod state_accessor; -pub mod state_cache; -pub mod state_compressor; -pub mod threads; -pub mod timeline; -pub mod user; - -pub trait Data: - alias::Data - + auth_chain::Data - + directory::Data - + edus::Data - + lazy_loading::Data - + metadata::Data - + outlier::Data - + pdu_metadata::Data - + search::Data - + short::Data - + state::Data - + state_accessor::Data - + state_cache::Data - + state_compressor::Data - + timeline::Data - + threads::Data - + user::Data -{ -} - -pub struct Service { - pub alias: alias::Service, - pub auth_chain: auth_chain::Service, - pub directory: directory::Service, - pub edus: edus::Service, - pub event_handler: event_handler::Service, - pub lazy_loading: lazy_loading::Service, - pub metadata: metadata::Service, - pub outlier: outlier::Service, - pub pdu_metadata: pdu_metadata::Service, - pub search: search::Service, - pub short: short::Service, - pub state: state::Service, - pub state_accessor: state_accessor::Service, - pub state_cache: state_cache::Service, - pub state_compressor: state_compressor::Service, - pub timeline: timeline::Service, - pub threads: threads::Service, - pub spaces: spaces::Service, - pub user: user::Service, -} diff --git a/src/service/rooms/outlier.rs b/src/service/rooms/outlier.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/outlier.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index 0ed521dd..5722648b 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,9 +1,18 @@ use ruma::{CanonicalJsonObject, EventId}; -use crate::{PduEvent, Result}; +use crate::Result; -pub trait Data: Send + Sync { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; - fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; +pub(crate) trait Data: Send + Sync { + /// Returns the pdu from the outlier tree. + fn get_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result>; + + /// Append the PDU as an outlier. + fn add_pdu_outlier( + &self, + event_id: &EventId, + pdu: &CanonicalJsonObject, + ) -> Result<()>; } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs deleted file mode 100644 index dae41e4b..00000000 --- a/src/service/rooms/outlier/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -mod data; - -pub use data::Data; -use ruma::{CanonicalJsonObject, EventId}; - -use crate::{PduEvent, Result}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Returns the pdu from the outlier tree. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.db.get_outlier_pdu_json(event_id) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.db.get_outlier_pdu(event_id) - } - - /// Append the PDU as an outlier. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.db.add_pdu_outlier(event_id, pdu) - } -} diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata.rs similarity index 52% rename from src/service/rooms/pdu_metadata/mod.rs rename to src/service/rooms/pdu_metadata.rs index 411f4f54..92e0a458 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata.rs @@ -1,7 +1,5 @@ -mod data; use std::sync::Arc; -pub use data::Data; use ruma::{ api::client::relations::get_relating_events, events::{relation::RelationType, TimelineEventType}, @@ -9,12 +7,15 @@ use ruma::{ }; use serde::Deserialize; +use super::timeline::PduCount; use crate::{services, PduEvent, Result}; -use super::timeline::PduCount; +mod data; -pub struct Service { - pub db: &'static dyn Data, +pub(crate) use data::Data; + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, } #[derive(Clone, Debug, Deserialize)] @@ -29,9 +30,15 @@ struct ExtractRelatesToEventId { impl Service { #[tracing::instrument(skip(self, from, to))] - pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> { + pub(crate) fn add_relation( + &self, + from: PduCount, + to: PduCount, + ) -> Result<()> { match (from, to) { - (PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t), + (PduCount::Normal(f), PduCount::Normal(t)) => { + self.db.add_relation(f, t) + } _ => { // TODO: Relations with backfilled pdus @@ -40,14 +47,15 @@ impl Service { } } - #[allow(clippy::too_many_arguments)] - pub fn paginate_relations_with_filter( + #[allow(clippy::too_many_arguments, clippy::too_many_lines)] + #[tracing::instrument(skip(self))] + pub(crate) fn paginate_relations_with_filter( &self, sender_user: &UserId, room_id: &RoomId, target: &EventId, - filter_event_type: Option, - filter_rel_type: Option, + filter_event_type: Option<&TimelineEventType>, + filter_rel_type: Option<&RelationType>, from: PduCount, to: Option, limit: usize, @@ -57,43 +65,50 @@ impl Service { //TODO: Fix ruma: match body.dir { match ruma::api::Direction::Backward { ruma::api::Direction::Forward => { - let events_after: Vec<_> = services() - .rooms - .pdu_metadata - .relations_until(sender_user, room_id, target, from)? // TODO: should be relations_after + // TODO: should be relations_after + let events_after: Vec<_> = self + .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) + filter_event_type + .as_ref() + .is_none_or(|t| &&pdu.kind == t) + && if let Ok(content) = serde_json::from_str::< + ExtractRelatesToEventId, + >( + pdu.content.get() + ) { + filter_rel_type.as_ref().is_none_or(|r| { + &&content.relates_to.rel_type == r + }) } else { false } }) }) .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) + .user_can_see_event( + sender_user, + room_id, + &pdu.event_id, + ) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); - next_token = events_after.last().map(|(count, _)| count).copied(); + next_token = + events_after.last().map(|(count, _)| count).copied(); + // Reversed because relations are always most recent first let events_after: Vec<_> = events_after .into_iter() - .rev() // relations are always most recent first + .rev() .map(|(_, pdu)| pdu.to_message_like_event()) .collect(); @@ -101,42 +116,49 @@ impl Service { chunk: events_after, next_batch: next_token.map(|t| t.stringify()), prev_batch: Some(from.stringify()), + // TODO + recursion_depth: None, }) } ruma::api::Direction::Backward => { - let events_before: Vec<_> = services() - .rooms - .pdu_metadata + let events_before: Vec<_> = self .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { - filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) - && if let Ok(content) = - serde_json::from_str::( - pdu.content.get(), - ) - { - filter_rel_type - .as_ref() - .map_or(true, |r| &content.relates_to.rel_type == r) + filter_event_type + .as_ref() + .is_none_or(|t| &&pdu.kind == t) + && if let Ok(content) = serde_json::from_str::< + ExtractRelatesToEventId, + >( + pdu.content.get() + ) { + filter_rel_type.as_ref().is_none_or(|r| { + &&content.relates_to.rel_type == r + }) } else { false } }) }) .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(Result::ok) .filter(|(_, pdu)| { services() .rooms .state_accessor - .user_can_see_event(sender_user, room_id, &pdu.event_id) + .user_can_see_event( + sender_user, + room_id, + &pdu.event_id, + ) .unwrap_or(false) }) - .take_while(|&(k, _)| Some(k) != to) // Stop at `to` + .take_while(|&(k, _)| Some(k) != to) .collect(); - next_token = events_before.last().map(|(count, _)| count).copied(); + next_token = + events_before.last().map(|(count, _)| count).copied(); let events_before: Vec<_> = events_before .into_iter() @@ -147,44 +169,63 @@ impl Service { chunk: events_before, next_batch: next_token.map(|t| t.stringify()), prev_batch: Some(from.stringify()), + // TODO + recursion_depth: None, }) } } } - pub fn relations_until<'a>( + #[tracing::instrument(skip_all)] + pub(crate) fn relations_until<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, target: &'a EventId, until: PduCount, ) -> Result> + 'a> { - let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?; + let room_id = + services().rooms.short.get_or_create_shortroomid(room_id)?; let target = match services().rooms.timeline.get_pdu_count(target)? { Some(PduCount::Normal(c)) => c, // TODO: Support backfilled relations - _ => 0, // This will result in an empty iterator + // This will result in an empty iterator + _ => 0, }; self.db.relations_until(user_id, room_id, target, until) } #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + pub(crate) fn mark_as_referenced( + &self, + room_id: &RoomId, + event_ids: &[Arc], + ) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) } #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + pub(crate) fn is_event_referenced( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { self.db.is_event_referenced(room_id, event_id) } #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + pub(crate) fn mark_event_soft_failed( + &self, + event_id: &EventId, + ) -> Result<()> { self.db.mark_event_soft_failed(event_id) } #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + pub(crate) fn is_event_soft_failed( + &self, + event_id: &EventId, + ) -> Result { self.db.is_event_soft_failed(event_id) } } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index a4df34cc..baab44a4 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,20 +1,32 @@ use std::sync::Arc; -use crate::{service::rooms::timeline::PduCount, PduEvent, Result}; use ruma::{EventId, RoomId, UserId}; -pub trait Data: Send + Sync { +use crate::{ + service::rooms::{short::ShortRoomId, timeline::PduCount}, + PduEvent, Result, +}; + +pub(crate) trait Data: Send + Sync { fn add_relation(&self, from: u64, to: u64) -> Result<()>; #[allow(clippy::type_complexity)] fn relations_until<'a>( &'a self, user_id: &'a UserId, - room_id: u64, + room_id: ShortRoomId, target: u64, until: PduCount, ) -> Result> + 'a>>; - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; + fn mark_as_referenced( + &self, + room_id: &RoomId, + event_ids: &[Arc], + ) -> Result<()>; + fn is_event_referenced( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; fn is_event_soft_failed(&self, event_id: &EventId) -> Result; } diff --git a/src/service/rooms/search.rs b/src/service/rooms/search.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/search.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 7ea7e3d1..2372a9aa 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,13 +1,29 @@ -use crate::Result; use ruma::RoomId; -pub trait Data: Send + Sync { - fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; +use crate::{ + service::rooms::{short::ShortRoomId, timeline::PduId}, + Result, +}; + +pub(crate) trait Data: Send + Sync { + fn index_pdu( + &self, + shortroomid: ShortRoomId, + pdu_id: &PduId, + message_body: &str, + ) -> Result<()>; + + fn deindex_pdu( + &self, + shortroomid: ShortRoomId, + pdu_id: &PduId, + message_body: &str, + ) -> Result<()>; #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a>, Vec)>>; + ) -> Result + 'a>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs deleted file mode 100644 index b6f35e79..00000000 --- a/src/service/rooms/search/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -mod data; - -pub use data::Data; - -use crate::Result; -use ruma::RoomId; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - #[tracing::instrument(skip(self))] - pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { - self.db.index_pdu(shortroomid, pdu_id, message_body) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - self.db.search_pdus(room_id, search_string) - } -} diff --git a/src/service/rooms/short.rs b/src/service/rooms/short.rs new file mode 100644 index 00000000..ce4d207c --- /dev/null +++ b/src/service/rooms/short.rs @@ -0,0 +1,237 @@ +use std::sync::{Arc, Mutex}; + +use lru_cache::LruCache; +use ruma::{events::StateEventType, EventId, OwnedEventId, RoomId}; + +use crate::{ + observability::{FoundIn, Lookup, METRICS}, + utils::error::Result, +}; + +macro_rules! short_id_type { + ($name:ident) => { + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + #[repr(transparent)] + pub(crate) struct $name(u64); + + impl $name { + pub(crate) fn new(id: u64) -> Self { + Self(id) + } + + pub(crate) fn get(&self) -> u64 { + self.0 + } + } + }; +} + +short_id_type!(ShortRoomId); +short_id_type!(ShortEventId); +short_id_type!(ShortStateHash); +short_id_type!(ShortStateKey); + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, + shorteventid_cache: Option>>>, + eventidshort_cache: Option>>, + statekeyshort_cache: + Option>>, + shortstatekey_cache: + Option>>, +} + +impl Service { + pub(crate) fn new( + db: &'static dyn Data, + shorteventid_cache_size: usize, + eventidshort_cache_size: usize, + statekeyshort_cache_size: usize, + shortstatekey_cache_size: usize, + ) -> Self { + Self { + db, + shorteventid_cache: (shorteventid_cache_size > 0) + .then(|| Mutex::new(LruCache::new(shorteventid_cache_size))), + eventidshort_cache: (eventidshort_cache_size > 0) + .then(|| Mutex::new(LruCache::new(eventidshort_cache_size))), + statekeyshort_cache: (statekeyshort_cache_size > 0) + .then(|| Mutex::new(LruCache::new(statekeyshort_cache_size))), + shortstatekey_cache: (shortstatekey_cache_size > 0) + .then(|| Mutex::new(LruCache::new(shortstatekey_cache_size))), + } + } + + pub(crate) fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result { + let lookup = Lookup::CreateEventIdToShort; + + if let Some(cache) = &self.eventidshort_cache { + if let Some(short) = cache.lock().unwrap().get_mut(event_id) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(*short); + } + } + + let (short, created) = self.db.get_or_create_shorteventid(event_id)?; + + if created { + METRICS.record_lookup(lookup, FoundIn::Nothing); + } else { + METRICS.record_lookup(lookup, FoundIn::Database); + } + + if let Some(cache) = &self.eventidshort_cache { + cache.lock().unwrap().insert(event_id.to_owned(), short); + } + + Ok(short) + } + + pub(crate) fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + let lookup = Lookup::StateKeyToShort; + + if let Some(cache) = &self.statekeyshort_cache { + if let Some(short) = cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(Some(*short)); + } + } + + let short = self.db.get_shortstatekey(event_type, state_key)?; + + if let Some(short) = short { + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.statekeyshort_cache { + cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + } + } else { + METRICS.record_lookup(lookup, FoundIn::Nothing); + } + + Ok(short) + } + + pub(crate) fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let lookup = Lookup::CreateStateKeyToShort; + + if let Some(cache) = &self.statekeyshort_cache { + if let Some(short) = cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(*short); + } + } + + let (short, created) = + self.db.get_or_create_shortstatekey(event_type, state_key)?; + + if created { + METRICS.record_lookup(lookup, FoundIn::Nothing); + } else { + METRICS.record_lookup(lookup, FoundIn::Database); + } + + if let Some(cache) = &self.statekeyshort_cache { + cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + } + + Ok(short) + } + + pub(crate) fn get_eventid_from_short( + &self, + shorteventid: ShortEventId, + ) -> Result> { + let lookup = Lookup::ShortToEventId; + + if let Some(cache) = &self.shorteventid_cache { + if let Some(id) = cache.lock().unwrap().get_mut(&shorteventid) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(Arc::clone(id)); + } + } + let event_id = self.db.get_eventid_from_short(shorteventid)?; + + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.shorteventid_cache { + cache.lock().unwrap().insert(shorteventid, Arc::clone(&event_id)); + } + + Ok(event_id) + } + + pub(crate) fn get_statekey_from_short( + &self, + shortstatekey: ShortStateKey, + ) -> Result<(StateEventType, String)> { + let lookup = Lookup::ShortToStateKey; + + if let Some(cache) = &self.shortstatekey_cache { + if let Some(id) = cache.lock().unwrap().get_mut(&shortstatekey) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(id.clone()); + } + } + let x = self.db.get_statekey_from_short(shortstatekey)?; + + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.shortstatekey_cache { + cache.lock().unwrap().insert(shortstatekey, x.clone()); + } + + Ok(x) + } + + /// Returns `(shortstatehash, already_existed)` + pub(crate) fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(ShortStateHash, bool)> { + self.db.get_or_create_shortstatehash(state_hash) + } + + pub(crate) fn get_shortroomid( + &self, + room_id: &RoomId, + ) -> Result> { + self.db.get_shortroomid(room_id) + } + + pub(crate) fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result { + self.db.get_or_create_shortroomid(room_id) + } +} diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 652c525b..b21588be 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,31 +1,50 @@ use std::sync::Arc; -use crate::Result; use ruma::{events::StateEventType, EventId, RoomId}; -pub trait Data: Send + Sync { - fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; +use super::{ShortEventId, ShortRoomId, ShortStateHash, ShortStateKey}; +use crate::Result; + +pub(crate) trait Data: Send + Sync { + /// The returned bool indicates whether it was created + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result<(ShortEventId, bool)>; fn get_shortstatekey( &self, event_type: &StateEventType, state_key: &str, - ) -> Result>; + ) -> Result>; + /// The returned bool indicates whether it was created fn get_or_create_shortstatekey( &self, event_type: &StateEventType, state_key: &str, - ) -> Result; + ) -> Result<(ShortStateKey, bool)>; - fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; + fn get_eventid_from_short( + &self, + shorteventid: ShortEventId, + ) -> Result>; - fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; + fn get_statekey_from_short( + &self, + shortstatekey: ShortStateKey, + ) -> Result<(StateEventType, String)>; - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; + /// Returns `(shortstatehash, already_existed)` + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(ShortStateHash, bool)>; - fn get_shortroomid(&self, room_id: &RoomId) -> Result>; + fn get_shortroomid(&self, room_id: &RoomId) -> Result>; - fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs deleted file mode 100644 index 45fadd74..00000000 --- a/src/service/rooms/short/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -mod data; -use std::sync::Arc; - -pub use data::Data; -use ruma::{events::StateEventType, EventId, RoomId}; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { - self.db.get_or_create_shorteventid(event_id) - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - self.db.get_shortstatekey(event_type, state_key) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.db.get_or_create_shortstatekey(event_type, state_key) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - self.db.get_eventid_from_short(shorteventid) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - self.db.get_statekey_from_short(shortstatekey) - } - - /// Returns (shortstatehash, already_existed) - pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { - self.db.get_or_create_shortstatehash(state_hash) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.db.get_shortroomid(room_id) - } - - pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { - self.db.get_or_create_shortroomid(room_id) - } -} diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces.rs similarity index 57% rename from src/service/rooms/spaces/mod.rs rename to src/service/rooms/spaces.rs index 981d4a37..d3c52b1d 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces.rs @@ -13,10 +13,13 @@ use ruma::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{self, AllowRule, JoinRule, RoomJoinRulesEventContent}, + history_visibility::{ + HistoryVisibility, RoomHistoryVisibilityEventContent, + }, + join_rules::{ + self, AllowRule, JoinRule, RoomJoinRulesEventContent, + }, topic::RoomTopicEventContent, }, space::child::SpaceChildEventContent, @@ -26,28 +29,38 @@ use ruma::{ OwnedRoomId, RoomId, UserId, }; use tokio::sync::Mutex; - use tracing::{debug, error, warn}; +use super::state::ExtractType; use crate::{services, Error, PduEvent, Result}; -pub enum CachedJoinRule { - //Simplified(SpaceRoomJoinRule), +pub(crate) enum CachedJoinRule { Full(JoinRule), } -pub struct CachedSpaceChunk { +pub(crate) struct CachedSpaceChunk { chunk: SpaceHierarchyRoomsChunk, children: Vec, join_rule: CachedJoinRule, } -pub struct Service { - pub roomid_spacechunk_cache: Mutex>>, +pub(crate) struct Service { + roomid_spacechunk_cache: + Option>>>, } impl Service { - pub async fn get_hierarchy( + pub(crate) fn new(roomid_spacechunk_cache_size: usize) -> Self { + Self { + roomid_spacechunk_cache: (roomid_spacechunk_cache_size > 0).then( + || Mutex::new(LruCache::new(roomid_spacechunk_cache_size)), + ), + } + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self))] + pub(crate) async fn get_hierarchy( &self, sender_user: &UserId, room_id: &RoomId, @@ -63,13 +76,13 @@ impl Service { let mut results = Vec::new(); while let Some(current_room) = { - while stack.last().map_or(false, |s| s.is_empty()) { + while stack.last().is_some_and(Vec::is_empty) { stack.pop(); } - if !stack.is_empty() { - stack.last_mut().and_then(|s| s.pop()) - } else { + if stack.is_empty() { None + } else { + stack.last_mut().and_then(Vec::pop) } } { rooms_in_path.push(current_room.clone()); @@ -77,40 +90,34 @@ impl Service { break; } - if let Some(cached) = self - .roomid_spacechunk_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - if let Some(cached) = cached { - let allowed = match &cached.join_rule { - //CachedJoinRule::Simplified(s) => { - //self.handle_simplified_join_rule(s, sender_user, ¤t_room)? - //} - CachedJoinRule::Full(f) => { - self.handle_join_rule(f, sender_user, ¤t_room)? - } - }; - if allowed { - if left_to_skip > 0 { - left_to_skip -= 1; - } else { - results.push(cached.chunk.clone()); - } - if rooms_in_path.len() < max_depth { - stack.push(cached.children.clone()); + if let Some(cache) = &self.roomid_spacechunk_cache { + if let Some(cached) = + cache.lock().await.get_mut(¤t_room.clone()).as_ref() + { + if let Some(cached) = cached { + let allowed = match &cached.join_rule { + CachedJoinRule::Full(f) => self.handle_join_rule( + f, + sender_user, + ¤t_room, + )?, + }; + if allowed { + if left_to_skip > 0 { + left_to_skip -= 1; + } else { + results.push(cached.chunk.clone()); + } + if rooms_in_path.len() < max_depth { + stack.push(cached.children.clone()); + } } } + continue; } - continue; } - - if let Some(current_shortstatehash) = services() - .rooms - .state - .get_room_shortstatehash(¤t_room)? + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(¤t_room)? { let state = services() .rooms @@ -127,16 +134,21 @@ impl Service { continue; } - let pdu = services() - .rooms - .timeline - .get_pdu(&id)? - .ok_or_else(|| Error::bad_database("Event in space state not found"))?; + let pdu = + services().rooms.timeline.get_pdu(&id)?.ok_or_else( + || { + Error::bad_database( + "Event in space state not found", + ) + }, + )?; - if serde_json::from_str::(pdu.content.get()) - .ok() - .map(|c| c.via) - .map_or(true, |v| v.is_empty()) + if serde_json::from_str::( + pdu.content.get(), + ) + .ok() + .map(|c| c.via) + .is_none_or(|v| v.is_empty()) { continue; } @@ -150,7 +162,11 @@ impl Service { // TODO: Sort children children_ids.reverse(); - let chunk = self.get_room_chunk(sender_user, ¤t_room, children_pdus); + let chunk = self.get_room_chunk( + sender_user, + ¤t_room, + children_pdus, + ); if let Ok(chunk) = chunk { if left_to_skip > 0 { left_to_skip -= 1; @@ -160,26 +176,38 @@ impl Service { let join_rule = services() .rooms .state_accessor - .room_state_get(¤t_room, &StateEventType::RoomJoinRules, "")? + .room_state_get( + ¤t_room, + &StateEventType::RoomJoinRules, + "", + )? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") + .map_err(|error| { + error!( + %error, + "Invalid room join rule event" + ); + Error::BadDatabase( + "Invalid room join rule event in \ + database.", + ) }) }) .transpose()? .unwrap_or(JoinRule::Invite); - self.roomid_spacechunk_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children: children_ids.clone(), - join_rule: CachedJoinRule::Full(join_rule), - }), - ); + if let Some(cache) = &self.roomid_spacechunk_cache { + cache.lock().await.insert( + current_room.clone(), + Some(CachedSpaceChunk { + chunk, + children: children_ids.clone(), + join_rule: CachedJoinRule::Full(join_rule), + }), + ); + } } if rooms_in_path.len() < max_depth { @@ -196,19 +224,23 @@ impl Service { // Early return so the client can see some data already break; } - debug!("Asking {server} for /hierarchy"); + debug!(%server, "Asking other server for /hierarchy"); if let Ok(response) = services() .sending .send_federation_request( server, federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), + room_id: current_room.clone(), suggested_only, }, ) .await { - warn!("Got response from {server} for /hierarchy\n{response:?}"); + warn!( + %server, + ?response, + "Got response from other server for /hierarchy", + ); let chunk = SpaceHierarchyRoomsChunk { canonical_alias: response.room.canonical_alias, name: response.room.name, @@ -253,9 +285,17 @@ impl Service { }) } SpaceRoomJoinRule::Public => JoinRule::Public, - _ => return Err(Error::BadServerResponse("Unknown join rule")), + _ => { + return Err(Error::BadServerResponse( + "Unknown join rule", + )) + } }; - if self.handle_join_rule(&join_rule, sender_user, ¤t_room)? { + if self.handle_join_rule( + &join_rule, + sender_user, + ¤t_room, + )? { if left_to_skip > 0 { left_to_skip -= 1; } else { @@ -266,32 +306,18 @@ impl Service { } } - self.roomid_spacechunk_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceChunk { - chunk, - children, - join_rule: CachedJoinRule::Full(join_rule), - }), - ); - - /* TODO: - for child in response.children { - roomid_spacechunk_cache.insert( + if let Some(cache) = &self.roomid_spacechunk_cache { + cache.lock().await.insert( current_room.clone(), - CachedSpaceChunk { - chunk: child.chunk, + Some(CachedSpaceChunk { + chunk, children, - join_rule, - }, + join_rule: CachedJoinRule::Full(join_rule), + }), ); } - */ - } else { - self.roomid_spacechunk_cache - .lock() - .await - .insert(current_room.clone(), None); + } else if let Some(cache) = &self.roomid_spacechunk_cache { + cache.lock().await.insert(current_room.clone(), None); } } } @@ -306,6 +332,8 @@ impl Service { }) } + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self, children))] fn get_room_chunk( &self, sender_user: &UserId, @@ -316,12 +344,23 @@ impl Service { canonical_alias: services() .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? + .room_state_get( + room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) - .map_err(|_| { - Error::bad_database("Invalid canonical alias event in database.") + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room canonical alias event" + ); + Error::BadDatabase( + "Invalid canonical alias event in database.", + ) }) })?, name: services().rooms.state_accessor.get_name(room_id)?, @@ -330,7 +369,7 @@ impl Service { .state_cache .room_joined_count(room_id)? .unwrap_or_else(|| { - warn!("Room {} has no member count", room_id); + warn!("Room has no member count"); 0 }) .try_into() @@ -343,23 +382,40 @@ impl Service { .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) - .map_err(|_| { - error!("Invalid room topic event in database for room {}", room_id); - Error::bad_database("Invalid room topic event in database.") + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room topic event" + ); + Error::BadDatabase( + "Invalid room topic event in database.", + ) }) })?, world_readable: services() .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get( + room_id, + &StateEventType::RoomHistoryVisibility, + "", + )? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { - c.history_visibility == HistoryVisibility::WorldReadable + c.history_visibility + == HistoryVisibility::WorldReadable }) - .map_err(|_| { - Error::bad_database( - "Invalid room history visibility event in database.", + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room history visibility event" + ); + Error::BadDatabase( + "Invalid room history visibility event in \ + database.", ) }) })?, @@ -372,8 +428,15 @@ impl Service { .map(|c: RoomGuestAccessEventContent| { c.guest_access == GuestAccess::CanJoin }) - .map_err(|_| { - Error::bad_database("Invalid room guest access event in database.") + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room guest access event" + ); + Error::BadDatabase( + "Invalid room guest access event in database.", + ) }) })?, avatar_url: services() @@ -383,50 +446,61 @@ impl Service { .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) - .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room avatar event" + ); + Error::bad_database( + "Invalid room avatar event in database.", + ) + }) }) .transpose()? - // url is now an Option so we must flatten .flatten(), join_rule: { let join_rule = services() .rooms .state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? + .room_state_get( + room_id, + &StateEventType::RoomJoinRules, + "", + )? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) - .map_err(|e| { - error!("Invalid room join rule event in database: {}", e); - Error::BadDatabase("Invalid room join rule event in database.") + .map_err(|error| { + error!( + %error, + event_id = %s.event_id, + "Invalid room join rule event", + ); + Error::BadDatabase( + "Invalid room join rule event in database.", + ) }) }) .transpose()? .unwrap_or(JoinRule::Invite); if !self.handle_join_rule(&join_rule, sender_user, room_id)? { - debug!("User is not allowed to see room {room_id}"); + debug!("User is not allowed to see room"); // This error will be caught later return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "User is not allowed to see the room", )); } - self.translate_joinrule(&join_rule)? + Self::translate_joinrule(&join_rule)? }, + room_type: services() .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .map(|s| { - serde_json::from_str::(s.content.get()).map_err(|e| { - error!("Invalid room create event in database: {}", e); - Error::BadDatabase("Invalid room create event in database.") - }) - }) - .transpose()? - .and_then(|e| e.room_type), + .state + .get_create_content::(room_id)?, children_state: children .into_iter() .map(|pdu| pdu.to_stripped_spacechild_state_event()) @@ -434,18 +508,28 @@ impl Service { }) } - fn translate_joinrule(&self, join_rule: &JoinRule) -> Result { + pub(crate) async fn invalidate_cache(&self, room_id: &RoomId) { + if let Some(cache) = &self.roomid_spacechunk_cache { + cache.lock().await.remove(room_id); + } + } + + fn translate_joinrule(join_rule: &JoinRule) -> Result { match join_rule { JoinRule::Invite => Ok(SpaceRoomJoinRule::Invite), JoinRule::Knock => Ok(SpaceRoomJoinRule::Knock), JoinRule::Private => Ok(SpaceRoomJoinRule::Private), JoinRule::Restricted(_) => Ok(SpaceRoomJoinRule::Restricted), - JoinRule::KnockRestricted(_) => Ok(SpaceRoomJoinRule::KnockRestricted), + JoinRule::KnockRestricted(_) => { + Ok(SpaceRoomJoinRule::KnockRestricted) + } JoinRule::Public => Ok(SpaceRoomJoinRule::Public), _ => Err(Error::BadServerResponse("Unknown join rule")), } } + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] fn handle_simplified_join_rule( &self, join_rule: &SpaceRoomJoinRule, @@ -453,18 +537,17 @@ impl Service { room_id: &RoomId, ) -> Result { let allowed = match join_rule { - SpaceRoomJoinRule::Public => true, - SpaceRoomJoinRule::Knock => true, - SpaceRoomJoinRule::Invite => services() - .rooms - .state_cache - .is_joined(sender_user, room_id)?, + SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::Public => true, + SpaceRoomJoinRule::Invite => { + services().rooms.state_cache.is_joined(sender_user, room_id)? + } _ => false, }; Ok(allowed) } + #[tracing::instrument(skip(self, sender_user))] fn handle_join_rule( &self, join_rule: &JoinRule, @@ -472,7 +555,7 @@ impl Service { room_id: &RoomId, ) -> Result { if self.handle_simplified_join_rule( - &self.translate_joinrule(join_rule)?, + &Self::translate_joinrule(join_rule)?, sender_user, room_id, )? { @@ -482,7 +565,7 @@ impl Service { match join_rule { JoinRule::Restricted(r) => { for rule in &r.allow { - if let join_rules::AllowRule::RoomMembership(rm) = rule { + if let AllowRule::RoomMembership(rm) = rule { if let Ok(true) = services() .rooms .state_cache diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state.rs similarity index 53% rename from src/service/rooms/state/mod.rs rename to src/service/rooms/state.rs index f6581bb5..91a27049 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state.rs @@ -1,41 +1,99 @@ -mod data; use std::{ collections::{HashMap, HashSet}, + iter, sync::Arc, }; -pub use data::Data; use ruma::{ api::client::error::ErrorKind, events::{ - room::{create::RoomCreateEventContent, member::MembershipState}, + room::{create::PreviousRoom, member::MembershipState}, AnyStrippedStateEvent, StateEventType, TimelineEventType, }, + room::RoomType, + room_version_rules::AuthorizationRules, serde::Raw, state_res::{self, StateMap}, - EventId, OwnedEventId, RoomId, RoomVersionId, UserId, + EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; -use serde::Deserialize; -use tokio::sync::MutexGuard; +use serde::{de::DeserializeOwned, Deserialize}; use tracing::warn; -use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; +use super::{short::ShortStateHash, state_compressor::CompressedStateEvent}; +use crate::{ + service::globals::marker, + services, + utils::{ + calculate_hash, debug_slice_truncated, on_demand_hashmap::KeyToken, + }, + Error, PduEvent, Result, +}; -use super::state_compressor::CompressedStateEvent; +mod data; -pub struct Service { - pub db: &'static dyn Data, +pub(crate) use data::Data; + +pub(crate) trait ExtractCreateContent: DeserializeOwned { + type Extract; + + fn extract(self) -> Self::Extract; +} + +/// Extract the `room_version` from an `m.room.create` event +#[derive(Deserialize)] +pub(crate) struct ExtractVersion { + room_version: RoomVersionId, +} + +impl ExtractCreateContent for ExtractVersion { + type Extract = RoomVersionId; + + fn extract(self) -> Self::Extract { + self.room_version + } +} + +/// Extract the `type` from an `m.room.create` event +#[derive(Deserialize)] +pub(crate) struct ExtractType { + #[serde(rename = "type")] + kind: Option, +} + +impl ExtractCreateContent for ExtractType { + type Extract = Option; + + fn extract(self) -> Self::Extract { + self.kind + } +} + +#[derive(Deserialize)] +pub(crate) struct ExtractPredecessor { + predecessor: Option, +} + +impl ExtractCreateContent for ExtractPredecessor { + type Extract = Option; + + fn extract(self) -> Self::Extract { + self.predecessor + } +} + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, } impl Service { /// Set the room to the given statehash and update caches. - pub async fn force_state( + #[tracing::instrument(skip(self, statediffnew, _statediffremoved))] + pub(crate) async fn force_state( &self, - room_id: &RoomId, - shortstatehash: u64, + room_id: &KeyToken, + shortstatehash: ShortStateHash, statediffnew: Arc>, _statediffremoved: Arc>, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { for event_id in statediffnew.iter().filter_map(|new| { services() @@ -45,13 +103,15 @@ impl Service { .ok() .map(|(_, id)| id) }) { - let pdu = match services().rooms.timeline.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, + let Some(pdu) = + services().rooms.timeline.get_pdu_json(&event_id)? + else { + continue; }; let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + &serde_json::to_string(&pdu) + .expect("CanonicalJsonObj can be serialized to JSON"), ) { Ok(pdu) => pdu, Err(_) => continue, @@ -65,25 +125,25 @@ impl Service { } let membership = - match serde_json::from_str::(pdu.content.get()) { + match serde_json::from_str::( + pdu.content.get(), + ) { Ok(e) => e.membership, Err(_) => continue, }; - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, + let Some(state_key) = pdu.state_key else { + continue; }; - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, + let Ok(user_id) = UserId::parse(state_key) else { + continue; }; services().rooms.state_cache.update_membership( room_id, &user_id, - membership, + &membership, &pdu.sender, None, false, @@ -93,19 +153,16 @@ impl Service { services() .rooms .spaces - .roomid_spacechunk_cache - .lock() - .await - .remove(&pdu.room_id); + .invalidate_cache(&pdu.room_id) + .await; } - _ => continue, + _ => {} } } services().rooms.state_cache.update_joined_count(room_id)?; - self.db - .set_room_state(room_id, shortstatehash, state_lock)?; + self.db.set_room_state(room_id, shortstatehash)?; Ok(()) } @@ -115,30 +172,24 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, state_ids_compressed))] - pub fn set_event_state( + pub(crate) fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc>, - ) -> Result { - let shorteventid = services() - .rooms - .short - .get_or_create_shorteventid(event_id)?; + ) -> Result { + let shorteventid = + services().rooms.short.get_or_create_shorteventid(event_id)?; - let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; + let previous_shortstatehash = + self.db.get_room_shortstatehash(room_id)?; let state_hash = calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), + state_ids_compressed.iter().map(CompressedStateEvent::as_bytes), ); - let (shortstatehash, already_existed) = services() - .rooms - .short - .get_or_create_shortstatehash(&state_hash)?; + let (shortstatehash, already_existed) = + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash.map_or_else( @@ -154,12 +205,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) + .difference(&parent_stateinfo.full_state) .copied() .collect(); let statediffremoved: HashSet<_> = parent_stateinfo - .1 + .full_state .difference(&state_ids_compressed) .copied() .collect(); @@ -172,7 +223,8 @@ impl Service { shortstatehash, statediffnew, statediffremoved, - 1_000_000, // high number because no state will be based on this one + // high number because no state will be based on this one + 1_000_000, states_parents, )?; } @@ -187,13 +239,17 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, new_pdu))] - pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + pub(crate) fn append_to_state( + &self, + new_pdu: &PduEvent, + ) -> Result { let shorteventid = services() .rooms .short .get_or_create_shorteventid(&new_pdu.event_id)?; - let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; + let previous_shortstatehash = + self.get_room_shortstatehash(&new_pdu.room_id)?; if let Some(p) = previous_shortstatehash { self.db.set_event_state(shorteventid, p)?; @@ -210,10 +266,11 @@ impl Service { }, )?; - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &new_pdu.kind.to_string().into(), + state_key, + )?; let new = services() .rooms @@ -223,9 +280,9 @@ impl Service { let replaces = states_parents .last() .map(|info| { - info.1 + info.full_state .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .find(|compressed| compressed.state == shortstatekey) }) .unwrap_or_default(); @@ -234,7 +291,8 @@ impl Service { } // TODO: statehash with deterministic inputs - let shortstatehash = services().globals.next_count()?; + let shortstatehash = + ShortStateHash::new(services().globals.next_count()?); let mut statediffnew = HashSet::new(); statediffnew.insert(new); @@ -254,136 +312,144 @@ impl Service { Ok(shortstatehash) } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) + Ok(previous_shortstatehash + .expect("first event in room must be a state event")) } } + /// Gather events to help the invited user identify the room + /// + /// Also includes the invite event itself. + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( + pub(crate) fn get_helpful_invite_events( &self, invite_event: &PduEvent, ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCreate, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomJoinRules, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomAvatar, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomName, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = services().rooms.state_accessor.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } + let helpful_events = [ + (StateEventType::RoomCreate, ""), + (StateEventType::RoomJoinRules, ""), + (StateEventType::RoomCanonicalAlias, ""), + (StateEventType::RoomAvatar, ""), + (StateEventType::RoomName, ""), + (StateEventType::RoomMember, invite_event.sender.as_str()), + ]; - state.push(invite_event.to_stripped_state_event()); - Ok(state) + let helpful_events = + helpful_events.into_iter().filter_map(|(state_type, state_key)| { + let state = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &state_type, + state_key, + ); + + match state { + Ok(Some(x)) => Some(Ok(x.to_stripped_state_event())), + Err(x) => Some(Err(x)), + Ok(None) => None, + } + }); + + let actual_event = + iter::once(Ok(invite_event.to_stripped_state_event())); + + helpful_events.chain(actual_event).collect() } /// Set the state hash to a new version, but does not update state_cache. #[tracing::instrument(skip(self))] - pub fn set_room_state( + pub(crate) fn set_room_state( &self, - room_id: &RoomId, - shortstatehash: u64, - mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + room_id: &KeyToken, + shortstatehash: ShortStateHash, ) -> Result<()> { - self.db.set_room_state(room_id, shortstatehash, mutex_lock) + self.db.set_room_state(room_id, shortstatehash) } - /// Returns the room's version. + /// Returns the value of a field of an `m.room.create` event's `content`. + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { + pub(crate) fn get_create_content( + &self, + room_id: &RoomId, + ) -> Result { let create_event = services().rooms.state_accessor.room_state_get( room_id, &StateEventType::RoomCreate, "", )?; - let create_event_content: RoomCreateEventContent = create_event + let content_field = create_event .as_ref() .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) + serde_json::from_str::(create_event.content.get()).map_err( + |error| { + warn!(%error, "Invalid create event"); + Error::BadDatabase("Invalid create event in db.") + }, + ) }) .transpose()? - .ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "No create event found"))?; + .ok_or_else(|| { + Error::BadRequest( + ErrorKind::InvalidParam, + "No create event found", + ) + })?; - Ok(create_event_content.room_version) + Ok(content_field.extract()) } - pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + #[tracing::instrument(skip(self))] + pub(crate) fn get_room_shortstatehash( + &self, + room_id: &RoomId, + ) -> Result> { self.db.get_room_shortstatehash(room_id) } - pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + #[tracing::instrument(skip(self))] + pub(crate) fn get_forward_extremities( + &self, + room_id: &RoomId, + ) -> Result>> { self.db.get_forward_extremities(room_id) } - pub fn set_forward_extremities( + #[tracing::instrument( + skip(self, event_ids), + fields(event_ids = debug_slice_truncated(&event_ids, 5)), + )] + pub(crate) fn set_forward_extremities( &self, - room_id: &RoomId, + room_id: &KeyToken, event_ids: Vec, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - self.db - .set_forward_extremities(room_id, event_ids, state_lock) + self.db.set_forward_extremities(room_id, event_ids) } /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( + #[tracing::instrument(skip(self, rules))] + pub(crate) fn get_auth_events( &self, room_id: &RoomId, + rules: &AuthorizationRules, kind: &TimelineEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, ) -> Result>> { - let shortstatehash = if let Some(current_shortstatehash) = - services().rooms.state.get_room_shortstatehash(room_id)? - { - current_shortstatehash - } else { + let Some(shortstatehash) = self.get_room_shortstatehash(room_id)? + else { return Ok(HashMap::new()); }; - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); + let auth_events = state_res::auth_types_for_event( + kind, sender, state_key, content, rules, + ) + .expect("content is a valid JSON object"); let mut sauthevents = auth_events .into_iter() @@ -391,7 +457,10 @@ impl Service { services() .rooms .short - .get_shortstatekey(&event_type.to_string().into(), &state_key) + .get_shortstatekey( + &event_type.to_string().into(), + &state_key, + ) .ok() .flatten() .map(|s| (s, (event_type, state_key))) @@ -404,7 +473,7 @@ impl Service { .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") - .1; + .full_state; Ok(full_state .iter() diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 96116b02..452c3b1b 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,31 +1,47 @@ -use crate::Result; -use ruma::{EventId, OwnedEventId, RoomId}; use std::{collections::HashSet, sync::Arc}; -use tokio::sync::MutexGuard; -pub trait Data: Send + Sync { +use ruma::{EventId, OwnedEventId, OwnedRoomId, RoomId}; + +use crate::{ + service::{ + globals::marker, + rooms::short::{ShortEventId, ShortStateHash}, + }, + utils::on_demand_hashmap::KeyToken, + Result, +}; + +pub(crate) trait Data: Send + Sync { /// Returns the last state hash key added to the db for the given room. - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; - - /// Set the state hash to a new version, but does not update state_cache. - fn set_room_state( + fn get_room_shortstatehash( &self, room_id: &RoomId, - new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result>; + + /// Set the state hash to a new version, but does not update `state_cache`. + fn set_room_state( + &self, + room_id: &KeyToken, + new_shortstatehash: ShortStateHash, ) -> Result<()>; /// Associates a state with an event. - fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; + fn set_event_state( + &self, + shorteventid: ShortEventId, + shortstatehash: ShortStateHash, + ) -> Result<()>; - /// Returns all events we would send as the prev_events of the next event. - fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; + /// Returns all events we would send as the `prev_events` of the next event. + fn get_forward_extremities( + &self, + room_id: &RoomId, + ) -> Result>>; /// Replace the forward extremities of the room. fn set_forward_extremities( &self, - room_id: &RoomId, + room_id: &KeyToken, event_ids: Vec, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state_accessor.rs b/src/service/rooms/state_accessor.rs new file mode 100644 index 00000000..a72a9f44 --- /dev/null +++ b/src/service/rooms/state_accessor.rs @@ -0,0 +1,622 @@ +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + +use lru_cache::LruCache; +use ruma::{ + events::{ + room::{ + history_visibility::{ + HistoryVisibility, RoomHistoryVisibilityEventContent, + }, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + StateEventType, TimelineEventType, + }, + state_res::Event, + EventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + UserId, +}; +use serde_json::value::to_raw_value; +use tracing::{error, warn}; + +use super::short::{ShortStateHash, ShortStateKey}; +use crate::{ + observability::{FoundIn, Lookup, METRICS}, + service::{globals::marker, pdu::PduBuilder, rooms::state::ExtractVersion}, + services, + utils::{on_demand_hashmap::KeyToken, room_version::RoomVersion}, + Error, PduEvent, Result, +}; + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, + server_visibility_cache: + Option>>, + user_visibility_cache: + Option>>, +} + +impl Service { + pub(crate) fn new( + db: &'static dyn Data, + server_visibility_cache_size: usize, + user_visibility_cache_size: usize, + ) -> Self { + Self { + db, + server_visibility_cache: (server_visibility_cache_size > 0).then( + || Mutex::new(LruCache::new(server_visibility_cache_size)), + ), + user_visibility_cache: (user_visibility_cache_size > 0) + .then(|| Mutex::new(LruCache::new(user_visibility_cache_size))), + } + } + + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + #[tracing::instrument(skip(self))] + pub(crate) async fn state_full_ids( + &self, + shortstatehash: ShortStateHash, + ) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .full_state; + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state.iter() { + let parsed = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; + result.insert(parsed.0, parsed.1); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + Ok(result) + } + + #[tracing::instrument(skip(self))] + pub(crate) async fn state_full( + &self, + shortstatehash: ShortStateHash, + ) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .full_state; + + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state.iter() { + let (_, eventid) = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; + if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { + result.insert( + ( + pdu.kind.to_string().into(), + pdu.state_key + .as_ref() + .ok_or_else(|| { + Error::bad_database( + "State event has no state key.", + ) + })? + .clone(), + ), + pdu, + ); + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + Ok(result) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + #[tracing::instrument(skip(self))] + pub(crate) fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + let Some(shortstatekey) = + services().rooms.short.get_shortstatekey(event_type, state_key)? + else { + return Ok(None); + }; + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .full_state; + Ok(full_state + .iter() + .find(|compressed| compressed.state == shortstatekey) + .and_then(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) + .ok() + .map(|(_, id)| id) + })) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub(crate) fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.state_get_id(shortstatehash, event_type, state_key)? + .map_or(Ok(None), |event_id| { + services().rooms.timeline.get_pdu(&event_id) + }) + } + + /// Get membership for given user in state + #[tracing::instrument(skip(self))] + fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> Result { + self.state_get( + shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map_or(Ok(MembershipState::Leave), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomMemberEventContent| c.membership) + .map_err(|_| { + Error::bad_database( + "Invalid room membership event in database.", + ) + }) + }) + } + + /// The user was a joined member at this state (potentially in the past) + #[tracing::instrument(skip(self), ret(level = "trace"))] + fn user_was_joined( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> bool { + self.user_membership(shortstatehash, user_id) + .is_ok_and(|s| s == MembershipState::Join) + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + #[tracing::instrument(skip(self), ret(level = "trace"))] + fn user_was_invited( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> bool { + self.user_membership(shortstatehash, user_id).is_ok_and(|s| { + s == MembershipState::Join || s == MembershipState::Invite + }) + } + + /// Whether a server is allowed to see an event through federation, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self))] + pub(crate) fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { + let lookup = Lookup::VisibilityForServer; + + let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { + return Ok(true); + }; + + if let Some(cache) = &self.server_visibility_cache { + if let Some(visibility) = cache + .lock() + .unwrap() + .get_mut(&(origin.to_owned(), shortstatehash)) + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(*visibility); + } + } + + let history_visibility = self + .state_get( + shortstatehash, + &StateEventType::RoomHistoryVisibility, + "", + )? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }) + .map_err(|_| { + Error::bad_database( + "Invalid history visibility event in database.", + ) + }) + })?; + + let mut current_server_members = services() + .rooms + .state_cache + .room_members(room_id) + .filter_map(Result::ok) + .filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable | HistoryVisibility::Shared => { + true + } + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST + // invited, else deny + current_server_members.any(|member| { + self.user_was_invited(shortstatehash, &member) + }) + } + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members + .any(|member| self.user_was_joined(shortstatehash, &member)) + } + other => { + error!(kind = %other, "Unknown history visibility"); + false + } + }; + + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.server_visibility_cache { + cache + .lock() + .unwrap() + .insert((origin.to_owned(), shortstatehash), visibility); + } + + Ok(visibility) + } + + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self))] + pub(crate) fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, + ) -> Result { + let lookup = Lookup::VisibilityForUser; + + let Some(shortstatehash) = self.pdu_shortstatehash(event_id)? else { + return Ok(true); + }; + + if let Some(cache) = &self.user_visibility_cache { + if let Some(visibility) = cache + .lock() + .unwrap() + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(*visibility); + } + } + + let currently_member = + services().rooms.state_cache.is_joined(user_id, room_id)?; + + let history_visibility = self + .state_get( + shortstatehash, + &StateEventType::RoomHistoryVisibility, + "", + )? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }) + .map_err(|_| { + Error::bad_database( + "Invalid history visibility event in database.", + ) + }) + })?; + + let visibility = match history_visibility { + HistoryVisibility::WorldReadable => true, + HistoryVisibility::Shared => currently_member, + HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST + // invited, else deny + self.user_was_invited(shortstatehash, user_id) + } + HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id) + } + other => { + error!(kind = %other, "Unknown history visibility"); + false + } + }; + + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.user_visibility_cache { + cache + .lock() + .unwrap() + .insert((user_id.to_owned(), shortstatehash), visibility); + } + + Ok(visibility) + } + + /// Whether a user is allowed to see an event, based on + /// the room's history_visibility at that event's state. + #[tracing::instrument(skip(self, user_id, room_id))] + pub(crate) fn user_can_see_state_events( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { + let currently_member = + services().rooms.state_cache.is_joined(user_id, room_id)?; + + let history_visibility = self + .room_state_get( + room_id, + &StateEventType::RoomHistoryVisibility, + "", + )? + .map_or(Ok(HistoryVisibility::Shared), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }) + .map_err(|_| { + Error::bad_database( + "Invalid history visibility event in database.", + ) + }) + })?; + + Ok(currently_member + || history_visibility == HistoryVisibility::WorldReadable) + } + + /// Returns the state hash for this pdu. + #[tracing::instrument(skip(self))] + pub(crate) fn pdu_shortstatehash( + &self, + event_id: &EventId, + ) -> Result> { + self.db.pdu_shortstatehash(event_id) + } + + /// Returns the full room state. + #[tracing::instrument(skip(self))] + pub(crate) async fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_full(current_shortstatehash).await + } else { + Ok(HashMap::new()) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub(crate) fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_get_id(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self))] + pub(crate) fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_get(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + + #[tracing::instrument(skip(self))] + pub(crate) fn get_name(&self, room_id: &RoomId) -> Result> { + self.room_state_get(room_id, &StateEventType::RoomName, "")?.map_or( + Ok(None), + |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| Some(c.name)) + .map_err(|error| { + error!(%error, "Invalid room name event in database"); + Error::BadDatabase( + "Invalid room name event in database.", + ) + }) + }, + ) + } + + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + #[tracing::instrument(skip(self), ret(level = "trace"))] + pub(crate) fn user_can_invite( + &self, + room_id: &KeyToken, + sender: &UserId, + target_user: &UserId, + ) -> bool { + let content = + to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) + .expect("Event content always serializes"); + + let new_event = PduBuilder { + event_type: TimelineEventType::RoomMember, + content, + unsigned: None, + state_key: Some(target_user.into()), + redacts: None, + }; + + services() + .rooms + .timeline + .create_hash_and_sign_event(new_event, sender, room_id) + .is_ok() + } + + /// Checks if a given user can redact a given event + #[tracing::instrument(skip(self))] + pub(crate) fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + ) -> Result { + self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .map_or_else( + // Falling back on m.room.create to judge power levels + || { + if let Some(pdu) = self.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )? { + Ok(pdu.sender == sender + || if let Ok(Some(pdu)) = + services().rooms.timeline.get_pdu(redacts) + { + pdu.sender == sender + } else { + false + }) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events \ + in database for room", + )) + } + }, + |e| { + serde_json::from_str(e.content.get()) + .map(|c: RoomPowerLevelsEventContent| c.into()) + .map(|e: RoomPowerLevels| { + e.user_can_redact_event_of_other(sender) + || e.user_can_redact_own_event(sender) + && if let Ok(Some(pdu)) = services() + .rooms + .timeline + .get_pdu(redacts) + { + pdu.sender().server_name() + == sender.server_name() + } else { + false + } + }) + .map_err(|_| { + Error::bad_database( + "Invalid m.room.power_levels event in database", + ) + }) + }, + ) + } + + /// Checks whether a redaction event is authorized against the current state + /// of it's room. + /// + /// Panics if `event` is not a `m.room.redaction` event. + pub(crate) fn redaction_event_allowed( + &self, + event: &PduEvent, + ) -> Result { + assert_eq!( + event.kind, + TimelineEventType::RoomRedaction, + "event must be a redaction event" + ); + + let room_version_id = services() + .rooms + .state + .get_create_content::(&event.room_id)?; + let room_version = RoomVersion::try_from(&room_version_id)?; + + let Some(redacted_id) = event.redacts(&room_version) else { + return Ok(false); + }; + + self.user_can_redact(&redacted_id, &event.sender, &event.room_id) + } +} diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index f3ae3c21..4399dc66 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,59 +1,11 @@ -use std::{collections::HashMap, sync::Arc}; +use ruma::EventId; -use async_trait::async_trait; -use ruma::{events::StateEventType, EventId, RoomId}; - -use crate::{PduEvent, Result}; - -#[async_trait] -pub trait Data: Send + Sync { - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; - - async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>>; - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>>; - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>>; +use crate::{service::rooms::short::ShortStateHash, Result}; +pub(crate) trait Data: Send + Sync { /// Returns the state hash for this pdu. - fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; - - /// Returns the full room state. - async fn room_state_full( + fn pdu_shortstatehash( &self, - room_id: &RoomId, - ) -> Result>>; - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>>; - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>>; + event_id: &EventId, + ) -> Result>; } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs deleted file mode 100644 index 53e3176f..00000000 --- a/src/service/rooms/state_accessor/mod.rs +++ /dev/null @@ -1,398 +0,0 @@ -mod data; -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; - -pub use data::Data; -use lru_cache::LruCache; -use ruma::{ - events::{ - room::{ - avatar::RoomAvatarEventContent, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, - StateEventType, - }, - state_res::Event, - EventId, JsOption, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, -}; -use serde_json::value::to_raw_value; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result}; - -pub struct Service { - pub db: &'static dyn Data, - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, -} - -impl Service { - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - self.db.state_full_ids(shortstatehash).await - } - - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - self.db.state_full(shortstatehash).await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.db.state_get_id(shortstatehash, event_type, state_key) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.db.state_get(shortstatehash, event_type, state_key) - } - - /// Get membership for given user in state - fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { - self.state_get( - shortstatehash, - &StateEventType::RoomMember, - user_id.as_str(), - )? - .map_or(Ok(MembershipState::Leave), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomMemberEventContent| c.membership) - .map_err(|_| Error::bad_database("Invalid room membership event in database.")) - }) - } - - /// The user was a joined member at this state (potentially in the past) - fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .map(|s| s == MembershipState::Join) - .unwrap_or_default() // Return sensible default, i.e. false - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id) - .map(|s| s == MembershipState::Join || s == MembershipState::Invite) - .unwrap_or_default() // Return sensible default, i.e. false - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, origin, room_id, event_id))] - pub fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> Result { - let shortstatehash = match self.pdu_shortstatehash(event_id)? { - Some(shortstatehash) => shortstatehash, - None => return Ok(true), - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .unwrap() - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return Ok(*visibility); - } - - let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - let mut current_server_members = services() - .rooms - .state_cache - .room_members(room_id) - .filter_map(|r| r.ok()) - .filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members.any(|member| self.user_was_invited(shortstatehash, &member)) - } - HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members.any(|member| self.user_was_joined(shortstatehash, &member)) - } - _ => { - error!("Unknown history visibility {history_visibility}"); - false - } - }; - - self.server_visibility_cache - .lock() - .unwrap() - .insert((origin.to_owned(), shortstatehash), visibility); - - Ok(visibility) - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id, event_id))] - pub fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> Result { - let shortstatehash = match self.pdu_shortstatehash(event_id)? { - Some(shortstatehash) => shortstatehash, - None => return Ok(true), - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .unwrap() - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return Ok(*visibility); - } - - let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; - - let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - let visibility = match history_visibility { - HistoryVisibility::WorldReadable => true, - HistoryVisibility::Shared => currently_member, - HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id) - } - HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id) - } - _ => { - error!("Unknown history visibility {history_visibility}"); - false - } - }; - - self.user_visibility_cache - .lock() - .unwrap() - .insert((user_id.to_owned(), shortstatehash), visibility); - - Ok(visibility) - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip(self, user_id, room_id))] - pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; - - let history_visibility = self - .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? - .map_or(Ok(HistoryVisibility::Shared), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) - .map_err(|_| { - Error::bad_database("Invalid history visibility event in database.") - }) - })?; - - Ok(currently_member || history_visibility == HistoryVisibility::WorldReadable) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.db.pdu_shortstatehash(event_id) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - self.db.room_state_full(room_id).await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.db.room_state_get_id(room_id, event_type, state_key) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.db.room_state_get(room_id, event_type, state_key) - } - - pub fn get_name(&self, room_id: &RoomId) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomName, "")? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map(|c: RoomNameEventContent| Some(c.name)) - .map_err(|e| { - error!( - "Invalid room name event in database for room {}. {}", - room_id, e - ); - Error::bad_database("Invalid room name event in database.") - }) - }) - } - - pub fn get_avatar(&self, room_id: &RoomId) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomAvatar, "")? - .map_or(Ok(JsOption::Undefined), |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) - }) - } - - pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &MutexGuard<'_, ()>, - ) -> Result { - let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite)) - .expect("Event content always serializes"); - - let new_event = PduBuilder { - event_type: ruma::events::TimelineEventType::RoomMember, - content, - unsigned: None, - state_key: Some(target_user.into()), - redacts: None, - }; - - Ok(services() - .rooms - .timeline - .create_hash_and_sign_event(new_event, sender, room_id, state_lock) - .is_ok()) - } - - pub fn get_member( - &self, - room_id: &RoomId, - user_id: &UserId, - ) -> Result> { - services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .map_or(Ok(None), |s| { - serde_json::from_str(s.content.get()) - .map_err(|_| Error::bad_database("Invalid room member event in database.")) - }) - } - - /// Checks if a given user can redact a given event - /// - /// If `federation` is `true`, it allows redaction events from any user of the same server - /// as the original event sender, [as required by room versions >= - /// v3](https://spec.matrix.org/v1.10/rooms/v11/#handling-redactions) - pub fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, - ) -> Result { - self.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? - .map(|e| { - serde_json::from_str(e.content.get()) - .map(|c: RoomPowerLevelsEventContent| c.into()) - .map(|e: RoomPowerLevels| { - e.user_can_redact_event_of_other(sender) - || e.user_can_redact_own_event(sender) - && if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) - { - if federation { - pdu.sender().server_name() == sender.server_name() - } else { - pdu.sender == sender - } - } else { - false - } - }) - .map_err(|_| { - Error::bad_database("Invalid m.room.power_levels event in database") - }) - }) - // Falling back on m.room.create to judge power levels - .unwrap_or_else(|| { - if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? { - Ok(pdu.sender == sender - || if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) { - pdu.sender == sender - } else { - false - }) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - }) - } -} diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache.rs new file mode 100644 index 00000000..1bf0fdbc --- /dev/null +++ b/src/service/rooms/state_cache.rs @@ -0,0 +1,521 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, RwLock}, +}; + +use ruma::{ + events::{ + direct::DirectEventContent, + ignored_user_list::IgnoredUserListEventContent, + room::member::MembershipState, tag::TagEventContent, + AnyStrippedStateEvent, AnySyncStateEvent, + }, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; +use tracing::warn; + +use crate::{ + observability::{FoundIn, Lookup, METRICS}, + service::appservice::RegistrationInfo, + services, Error, Result, +}; + +mod data; + +pub(crate) use data::Data; + +use super::state::ExtractPredecessor; + +pub(crate) struct Service { + db: &'static dyn Data, + appservice_in_room_cache: + RwLock>>, + our_real_users_cache: + RwLock>>>, +} + +type RoomsLeft = (OwnedRoomId, Vec>); + +impl Service { + pub(crate) fn new(db: &'static dyn Data) -> Self { + Self { + db, + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + } + } + + /// Update current membership data. + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + #[tracing::instrument(skip(self, last_state))] + pub(crate) fn update_membership( + &self, + room_id: &RoomId, + user_id: &UserId, + membership: &MembershipState, + sender: &UserId, + last_state: Option>>, + update_joined_count: bool, + ) -> Result<()> { + // Keep track what remote users exist by adding them as "deactivated" + // users + if user_id.server_name() != services().globals.server_name() { + services().users.create(user_id, None)?; + // TODO: displayname, avatar url + } + + match &membership { + MembershipState::Join => { + // Check if the user never joined this room + if !self.once_joined(user_id, room_id)? { + // Add the user ID to the join list then + self.db.mark_as_once_joined(user_id, room_id)?; + + // Check if the room has a predecessor + if let Some(predecessor) = services() + .rooms + .state + .get_create_content::(room_id) + .inspect_err(|error| { + warn!(%error, "Failed to get room predecessor"); + }) + .ok() + .flatten() + { + self.copy_upgraded_account_data( + user_id, + &predecessor.room_id, + room_id, + )?; + } + } + + self.db.mark_as_joined(user_id, room_id)?; + } + MembershipState::Invite => { + // We want to know if the sender is ignored by the receiver + let is_ignored = services() + .account_data + .get_global::(user_id)? + .map(|event| { + event.deserialize() + .map_err(|error| { + warn!( + %error, + "Invalid m.ignored_user_list account data event", + ); + Error::BadDatabase("Invalid account data event.") + }) + }) + .transpose()? + .is_some_and(|ignored| { + ignored + .ignored_users + .iter() + .any(|(user, _details)| user == sender) + }); + + if is_ignored { + return Ok(()); + } + + self.db.mark_as_invited(user_id, room_id, last_state)?; + } + MembershipState::Leave | MembershipState::Ban => { + self.db.mark_as_left(user_id, room_id)?; + } + _ => {} + } + + if update_joined_count { + self.update_joined_count(room_id)?; + } + + Ok(()) + } + + /// Copy all account data references from the predecessor to a new room when + /// joining an upgraded room. + /// + /// References to the predecessor room are not removed. + #[tracing::instrument(skip(self))] + fn copy_upgraded_account_data( + &self, + user_id: &UserId, + from_room_id: &RoomId, + to_room_id: &RoomId, + ) -> Result<()> { + // - Push rules + // + // TODO: finish this once push rules are implemented. + // + // let mut push_rules_event_content: PushRulesEvent = + // account_data .get( + // None, + // user_id, + // EventType::PushRules, + // )?; + // + // NOTE: find where `predecessor.room_id` match + // and update to `room_id`. + // + // account_data + // .update( + // None, + // user_id, + // EventType::PushRules, + // &push_rules_event_content, + // globals, + // ) + // .ok(); + + self.copy_upgraded_account_data_tag(user_id, from_room_id, to_room_id)?; + self.copy_upgraded_account_data_direct( + user_id, + from_room_id, + to_room_id, + )?; + Ok(()) + } + + /// Copy `m.tag` account data to an upgraded room. + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + fn copy_upgraded_account_data_tag( + &self, + user_id: &UserId, + from_room_id: &RoomId, + to_room_id: &RoomId, + ) -> Result<()> { + let Some(event) = services() + .account_data + .get_room::(from_room_id, user_id)? + else { + return Ok(()); + }; + if let Err(error) = + services().account_data.update_room(to_room_id, user_id, &event) + { + warn!(%error, "error writing m.tag account data to upgraded room"); + } + + Ok(()) + } + + /// Copy references in `m.direct` account data events to an upgraded room. + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + fn copy_upgraded_account_data_direct( + &self, + user_id: &UserId, + from_room_id: &RoomId, + to_room_id: &RoomId, + ) -> Result<()> { + let Some(event_content) = services() + .account_data + .get_global::(user_id)? + else { + return Ok(()); + }; + + let mut event_content = event_content + .deserialize_as::() + .expect("RawValue -> Value should always succeed"); + + // As a server, we should try not to assume anything about the schema + // of this event. Account data may be arbitrary JSON. + // + // In particular, there is an element bug[1] that causes it to store + // m.direct events that don't match the schema from the spec. + // + // [1]: https://github.com/element-hq/element-web/issues/27630 + // + // Valid m.direct event content looks like this: + // + // { + // "@userid1": [ "!roomid1", "!roomid2" ], + // "@userid2": [ "!roomid3" ], + // } + // + // We want to find userid keys where the value contains from_room_id, + // and insert a new entry for to_room_id. This should work even if some + // of the userid keys do not conform to the spec. If parts of the object + // do not match the expected schema, we should prefer to skip just those + // parts. + + let mut event_updated = false; + let Some(direct_user_ids) = event_content.as_object_mut() else { + return Ok(()); + }; + for room_ids in direct_user_ids.values_mut() { + let Some(room_ids) = room_ids.as_array_mut() else { + continue; + }; + if room_ids.iter().any(|room_id| room_id == from_room_id.as_str()) { + room_ids.push(to_room_id.to_string().into()); + event_updated = true; + } + } + + if event_updated { + if let Err(error) = services().account_data.update_global( + user_id, + &Raw::new(&event_content) + .expect("json serialization should always succeed") + .cast::(), + ) { + warn!(%error, "error writing m.direct account data event after upgrading room"); + } + } + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + pub(crate) fn update_joined_count( + &self, + room_id: &RoomId, + ) -> Result>> { + let our_real_users = Arc::new(self.db.update_joined_count(room_id)?); + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.to_owned(), our_real_users.clone()); + self.appservice_in_room_cache.write().unwrap().remove(room_id); + + Ok(our_real_users) + } + + #[tracing::instrument(skip(self, room_id))] + pub(crate) fn get_our_real_users( + &self, + room_id: &RoomId, + ) -> Result>> { + let lookup = Lookup::OurRealUsers; + + if let Some(our_real_users) = + self.our_real_users_cache.read().unwrap().get(room_id).cloned() + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(our_real_users); + } + + let our_real_users = self.update_joined_count(room_id)?; + + METRICS.record_lookup(lookup, FoundIn::Database); + + Ok(our_real_users) + } + + #[tracing::instrument(skip(self, room_id, appservice))] + pub(crate) fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &RegistrationInfo, + ) -> Result { + let lookup = Lookup::AppserviceInRoom; + + if let Some(in_room) = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.registration.id)) + .copied() + { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(in_room); + } + + let in_room = self.db.appservice_in_room(room_id, appservice)?; + + METRICS.record_lookup(lookup, FoundIn::Database); + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.registration.id.clone(), in_room); + + Ok(in_room) + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + pub(crate) fn forget( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result<()> { + self.db.forget(room_id, user_id) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + pub(crate) fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_servers(room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn server_in_room( + &self, + server: &ServerName, + room_id: &RoomId, + ) -> Result { + self.db.server_in_room(server, room_id) + } + + /// Returns an iterator of all rooms a server participates in (as far as we + /// know). + #[tracing::instrument(skip(self))] + pub(crate) fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> impl Iterator> + 'a { + self.db.server_rooms(server) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + pub(crate) fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_members(room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn room_joined_count( + &self, + room_id: &RoomId, + ) -> Result> { + self.db.room_joined_count(room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn room_invited_count( + &self, + room_id: &RoomId, + ) -> Result> { + self.db.room_invited_count(room_id) + } + + /// Returns an iterator over all invited members of a room. + // Unused, but needed for consistency with other methods + #[allow(dead_code)] + #[tracing::instrument(skip(self))] + pub(crate) fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_members_invited(room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn get_invite_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { + self.db.get_invite_count(room_id, user_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn get_left_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result> { + self.db.get_left_count(room_id, user_id) + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + pub(crate) fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.rooms_joined(user_id) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + pub(crate) fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator< + Item = Result<(OwnedRoomId, Vec>)>, + > + 'a { + self.db.rooms_invited(user_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + self.db.invite_state(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + self.db.left_state(user_id, room_id) + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + pub(crate) fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.rooms_left(user_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn once_joined( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { + self.db.once_joined(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn is_joined( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { + self.db.is_joined(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn is_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { + self.db.is_invited(user_id, room_id) + } + + // Unused, but needed for consistency with other methods + #[allow(dead_code)] + #[tracing::instrument(skip(self))] + pub(crate) fn is_left( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result { + self.db.is_left(user_id, room_id) + } +} diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index b511919a..a265f99c 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,14 +1,19 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; -use crate::{service::appservice::RegistrationInfo, Result}; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; -pub trait Data: Send + Sync { - fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; +use crate::{service::appservice::RegistrationInfo, Result}; + +pub(crate) trait Data: Send + Sync { + fn mark_as_once_joined( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()>; fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_invited( &self, @@ -18,11 +23,16 @@ pub trait Data: Send + Sync { ) -> Result<()>; fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; + fn update_joined_count( + &self, + room_id: &RoomId, + ) -> Result>; - fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; - - fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> Result; + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &RegistrationInfo, + ) -> Result; /// Makes a user forget a room. fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; @@ -33,9 +43,14 @@ pub trait Data: Send + Sync { room_id: &RoomId, ) -> Box> + 'a>; - fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result; + fn server_in_room( + &self, + server: &ServerName, + room_id: &RoomId, + ) -> Result; - /// Returns an iterator of all rooms a server participates in (as far as we know). + /// Returns an iterator of all rooms a server participates in (as far as we + /// know). fn server_rooms<'a>( &'a self, server: &ServerName, @@ -51,21 +66,23 @@ pub trait Data: Send + Sync { fn room_invited_count(&self, room_id: &RoomId) -> Result>; - /// Returns an iterator over all User IDs who ever joined a room. - fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> Box> + 'a>; - /// Returns an iterator over all invited members of a room. fn room_members_invited<'a>( &'a self, room_id: &RoomId, ) -> Box> + 'a>; - fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + fn get_invite_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result>; - fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + fn get_left_count( + &self, + room_id: &RoomId, + user_id: &UserId, + ) -> Result>; /// Returns an iterator over all rooms this user joined. fn rooms_joined<'a>( @@ -78,7 +95,11 @@ pub trait Data: Send + Sync { fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box>)>> + 'a>; + ) -> Box< + dyn Iterator< + Item = Result<(OwnedRoomId, Vec>)>, + > + 'a, + >; fn invite_state( &self, @@ -97,7 +118,10 @@ pub trait Data: Send + Sync { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box>)>> + 'a>; + ) -> Box< + dyn Iterator>)>> + + 'a, + >; fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; @@ -105,5 +129,7 @@ pub trait Data: Send + Sync { fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result; + // TODO: Use this when implementing sync filtering + #[allow(dead_code)] fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs deleted file mode 100644 index c108695d..00000000 --- a/src/service/rooms/state_cache/mod.rs +++ /dev/null @@ -1,353 +0,0 @@ -mod data; -use std::{collections::HashSet, sync::Arc}; - -pub use data::Data; - -use ruma::{ - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - room::{create::RoomCreateEventContent, member::MembershipState}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, - }, - serde::Raw, - OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, -}; -use tracing::warn; - -use crate::{service::appservice::RegistrationInfo, services, Error, Result}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - /// Update current membership data. - #[tracing::instrument(skip(self, last_state))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != services().globals.server_name() { - services().users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.db.mark_as_once_joined(user_id, room_id)?; - - // Check if the room has a predecessor - if let Some(predecessor) = services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = services() - .account_data - .get( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? - .map(|event| { - serde_json::from_str(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") - }) - }) - { - services() - .account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event?, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(direct_event) = services() - .account_data - .get( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? - .map(|event| { - serde_json::from_str::(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") - }) - }) - { - let mut direct_event = direct_event?; - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - services().account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event) - .expect("to json always works"), - )?; - } - }; - } - } - - self.db.mark_as_joined(user_id, room_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = services() - .account_data - .get( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map(|event| { - serde_json::from_str::(event.get()).map_err(|e| { - warn!("Invalid account data event in db: {e:?}"); - Error::BadDatabase("Invalid account data event in db.") - }) - }) - .transpose()? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|(user, _details)| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - self.db.mark_as_invited(user_id, room_id, last_state)?; - } - MembershipState::Leave | MembershipState::Ban => { - self.db.mark_as_left(user_id, room_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id))] - pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { - self.db.update_joined_count(room_id) - } - - #[tracing::instrument(skip(self, room_id))] - pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { - self.db.get_our_real_users(room_id) - } - - #[tracing::instrument(skip(self, room_id, appservice))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &RegistrationInfo, - ) -> Result { - self.db.appservice_in_room(room_id, appservice) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - self.db.forget(room_id, user_id) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - self.db.room_servers(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - self.db.server_in_room(server, room_id) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator> + 'a { - self.db.server_rooms(server) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - self.db.room_members(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.db.room_joined_count(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.db.room_invited_count(room_id) - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - self.db.room_useroncejoined(room_id) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - self.db.room_members_invited(room_id) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.get_invite_count(room_id, user_id) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.get_left_count(room_id, user_id) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator> + 'a { - self.db.rooms_joined(user_id) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>)>> + 'a { - self.db.rooms_invited(user_id) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - self.db.invite_state(user_id, room_id) - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - self.db.left_state(user_id, room_id) - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>)>> + 'a { - self.db.rooms_left(user_id) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.once_joined(user_id, room_id) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.is_joined(user_id, room_id) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.is_invited(user_id, room_id) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.is_left(user_id, room_id) - } -} diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor.rs new file mode 100644 index 00000000..304cd9cb --- /dev/null +++ b/src/service/rooms/state_compressor.rs @@ -0,0 +1,381 @@ +use std::{ + array, + collections::HashSet, + mem::size_of, + sync::{Arc, Mutex}, +}; + +use lru_cache::LruCache; +use ruma::{EventId, RoomId}; + +use super::short::{ShortEventId, ShortStateHash, ShortStateKey}; +use crate::{ + observability::{FoundIn, Lookup, METRICS}, + services, utils, Result, +}; + +pub(crate) mod data; + +pub(crate) use data::Data; +use data::StateDiff; + +#[derive(Clone)] +pub(crate) struct CompressedStateLayer { + pub(crate) shortstatehash: ShortStateHash, + pub(crate) full_state: Arc>, + pub(crate) added: Arc>, + pub(crate) removed: Arc>, +} + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, + + #[allow(clippy::type_complexity)] + pub(crate) stateinfo_cache: + Option>>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub(crate) struct CompressedStateEvent { + pub(crate) state: ShortStateKey, + pub(crate) event: ShortEventId, +} + +impl CompressedStateEvent { + pub(crate) fn as_bytes( + &self, + ) -> [u8; size_of::() + size_of::()] { + let mut bytes = self + .state + .get() + .to_be_bytes() + .into_iter() + .chain(self.event.get().to_be_bytes()); + array::from_fn(|_| bytes.next().unwrap()) + } + + pub(crate) fn from_bytes( + bytes: [u8; size_of::() + size_of::()], + ) -> Self { + let state = ShortStateKey::new(u64::from_be_bytes( + bytes[0..8].try_into().unwrap(), + )); + let event = ShortEventId::new(u64::from_be_bytes( + bytes[8..16].try_into().unwrap(), + )); + + Self { + state, + event, + } + } +} + +impl Service { + pub(crate) fn new( + db: &'static dyn Data, + stateinfo_cache_size: usize, + ) -> Self { + Self { + db, + stateinfo_cache: (stateinfo_cache_size > 0) + .then(|| Mutex::new(LruCache::new(stateinfo_cache_size))), + } + } + + /// Returns a stack with info on shortstatehash, full state, added diff and + /// removed diff for the selected shortstatehash and each parent layer. + #[allow(clippy::type_complexity)] + #[tracing::instrument(skip(self))] + pub(crate) fn load_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + ) -> Result> { + let lookup = Lookup::StateInfo; + + if let Some(cache) = &self.stateinfo_cache { + if let Some(r) = cache.lock().unwrap().get_mut(&shortstatehash) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(r.clone()); + } + } + + let StateDiff { + parent, + added, + removed, + } = self.db.get_statediff(shortstatehash)?; + + let response = if let Some(parent) = parent { + let mut response = self.load_shortstatehash_info(parent)?; + let mut state = (*response.last().unwrap().full_state).clone(); + state.extend(added.iter().copied()); + let removed = (*removed).clone(); + for r in &removed { + state.remove(r); + } + + response.push(CompressedStateLayer { + shortstatehash, + full_state: Arc::new(state), + added, + removed: Arc::new(removed), + }); + response + } else { + vec![CompressedStateLayer { + shortstatehash, + full_state: added.clone(), + added, + removed, + }] + }; + + METRICS.record_lookup(lookup, FoundIn::Database); + + if let Some(cache) = &self.stateinfo_cache { + cache.lock().unwrap().insert(shortstatehash, response.clone()); + } + + Ok(response) + } + + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + pub(crate) fn compress_state_event( + &self, + shortstatekey: ShortStateKey, + event_id: &EventId, + ) -> Result { + Ok(CompressedStateEvent { + state: shortstatekey, + event: services() + .rooms + .short + .get_or_create_shorteventid(event_id)?, + }) + } + + /// Returns shortstatekey, event id + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + pub(crate) fn parse_compressed_state_event( + &self, + compressed_event: &CompressedStateEvent, + ) -> Result<(ShortStateKey, Arc)> { + Ok(( + compressed_event.state, + services() + .rooms + .short + .get_eventid_from_short(compressed_event.event)?, + )) + } + + /// Creates a new shortstatehash that often is just a diff to an already + /// existing shortstatehash and therefore very efficient. + /// + /// There are multiple layers of diffs. The bottom layer 0 always contains + /// the full state. Layer 1 contains diffs to states of layer 0, layer 2 + /// diffs to layer 1 and so on. If layer n > 0 grows too big, it will be + /// combined with layer n-1 to create a new diff on layer n-1 that's + /// based on layer n-2. If that layer is also too big, it will recursively + /// fix above layers too. + /// + /// * `shortstatehash` - Shortstatehash of this state + /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid + /// * `statediffremoved` - Removed from base. Each vec is + /// shortstatekey+shorteventid + /// * `diff_to_sibling` - Approximately how much the diff grows each time + /// for this layer + /// * `parent_states` - A stack with info on shortstatehash, full state, + /// added diff and removed diff for each parent layer + #[allow(clippy::type_complexity)] + #[tracing::instrument(skip( + self, + statediffnew, + statediffremoved, + diff_to_sibling, + parent_states + ))] + pub(crate) fn save_state_from_diff( + &self, + shortstatehash: ShortStateHash, + statediffnew: Arc>, + statediffremoved: Arc>, + diff_to_sibling: usize, + mut parent_states: Vec, + ) -> Result<()> { + let diffsum = statediffnew.len() + statediffremoved.len(); + + if parent_states.len() > 3 { + // Number of layers + // To many layers, we have to go deeper + let parent = parent_states.pop().unwrap(); + + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); + + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { + // It was not added in the parent and we removed it + parent_removed.insert(*removed); + } + // Else it was added in the parent and we removed it again. We + // can forget this change + } + + for new in statediffnew.iter() { + if !parent_removed.remove(new) { + // It was not touched in the parent and we added it + parent_new.insert(*new); + } + // Else it was removed in the parent and we added it again. We + // can forget this change + } + + self.save_state_from_diff( + shortstatehash, + Arc::new(parent_new), + Arc::new(parent_removed), + diffsum, + parent_states, + )?; + + return Ok(()); + } + + if parent_states.is_empty() { + // There is no parent layer, create a new state + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }, + )?; + + return Ok(()); + } + + // Else we have two options. + // 1. We add the current diff on top of the parent layer. + // 2. We replace a layer above + + let parent = parent_states.pop().unwrap(); + let parent_diff = parent.added.len() + parent.removed.len(); + + if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { + // Diff too big, we replace above layer(s) + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); + + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { + // It was not added in the parent and we removed it + parent_removed.insert(*removed); + } + // Else it was added in the parent and we removed it again. We + // can forget this change + } + + for new in statediffnew.iter() { + if !parent_removed.remove(new) { + // It was not touched in the parent and we added it + parent_new.insert(*new); + } + // Else it was removed in the parent and we added it again. We + // can forget this change + } + + self.save_state_from_diff( + shortstatehash, + Arc::new(parent_new), + Arc::new(parent_removed), + diffsum, + parent_states, + )?; + } else { + // Diff small enough, we add diff as layer on top of parent + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: Some(parent.shortstatehash), + added: statediffnew, + removed: statediffremoved, + }, + )?; + } + + Ok(()) + } + + /// Returns the new shortstatehash, and the state diff from the previous + /// room state + #[allow(clippy::type_complexity)] + #[tracing::instrument(skip(self, new_state_ids_compressed))] + pub(crate) fn save_state( + &self, + room_id: &RoomId, + new_state_ids_compressed: Arc>, + ) -> Result<( + ShortStateHash, + Arc>, + Arc>, + )> { + let previous_shortstatehash = + services().rooms.state.get_room_shortstatehash(room_id)?; + + let state_hash = utils::calculate_hash( + new_state_ids_compressed.iter().map(CompressedStateEvent::as_bytes), + ); + + let (new_shortstatehash, already_existed) = + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; + + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok(( + new_shortstatehash, + Arc::new(HashSet::new()), + Arc::new(HashSet::new()), + )); + } + + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| self.load_shortstatehash_info(p), + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.full_state) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .full_state + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (Arc::new(statediffnew), Arc::new(statediffremoved)) + } else { + (new_state_ids_compressed, Arc::new(HashSet::new())) + }; + + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved.clone(), + // every state change is 2 event changes on average + 2, + states_parents, + )?; + } + + Ok((new_shortstatehash, statediffnew, statediffremoved)) + } +} diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index d221d576..aafaf009 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,15 +1,22 @@ use std::{collections::HashSet, sync::Arc}; use super::CompressedStateEvent; -use crate::Result; +use crate::{service::rooms::short::ShortStateHash, Result}; -pub struct StateDiff { - pub parent: Option, - pub added: Arc>, - pub removed: Arc>, +pub(crate) struct StateDiff { + pub(crate) parent: Option, + pub(crate) added: Arc>, + pub(crate) removed: Arc>, } -pub trait Data: Send + Sync { - fn get_statediff(&self, shortstatehash: u64) -> Result; - fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; +pub(crate) trait Data: Send + Sync { + fn get_statediff( + &self, + shortstatehash: ShortStateHash, + ) -> Result; + fn save_statediff( + &self, + shortstatehash: ShortStateHash, + diff: StateDiff, + ) -> Result<()>; } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs deleted file mode 100644 index 6118e06b..00000000 --- a/src/service/rooms/state_compressor/mod.rs +++ /dev/null @@ -1,324 +0,0 @@ -pub mod data; -use std::{ - collections::HashSet, - mem::size_of, - sync::{Arc, Mutex}, -}; - -pub use data::Data; -use lru_cache::LruCache; -use ruma::{EventId, RoomId}; - -use crate::{services, utils, Result}; - -use self::data::StateDiff; - -pub struct Service { - pub db: &'static dyn Data, - - #[allow(clippy::type_complexity)] - pub stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed - )>, - >, - >, -} - -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -impl Service { - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[allow(clippy::type_complexity)] - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let StateDiff { - parent, - added, - removed, - } = self.db.get_statediff(shortstatehash)?; - - if let Some(parent) = parent { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = (*response.last().unwrap().1).clone(); - state.extend(added.iter().copied()); - let removed = (*removed).clone(); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, Arc::new(state), added, Arc::new(removed))); - - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &services() - .rooms - .short - .get_or_create_shorteventid(event_id)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: &CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - services().rooms.short.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[allow(clippy::type_complexity)] - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: Arc>, - statediffremoved: Arc>, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - Arc>, // full state - Arc>, // added - Arc>, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); - - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { - // It was not added in the parent and we removed it - parent_removed.insert(*removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew.iter() { - if !parent_removed.remove(new) { - // It was not touched in the parent and we added it - parent_new.insert(*new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - self.db.save_statediff( - shortstatehash, - StateDiff { - parent: None, - added: statediffnew, - removed: statediffremoved, - }, - )?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = (*parent.2).clone(); - let mut parent_removed = (*parent.3).clone(); - - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { - // It was not added in the parent and we removed it - parent_removed.insert(*removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew.iter() { - if !parent_removed.remove(new) { - // It was not touched in the parent and we added it - parent_new.insert(*new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff( - shortstatehash, - StateDiff { - parent: Some(parent.0), - added: statediffnew, - removed: statediffremoved, - }, - )?; - } - - Ok(()) - } - - /// Returns the new shortstatehash, and the state diff from the previous room state - #[allow(clippy::type_complexity)] - pub fn save_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: Arc>, - ) -> Result<( - u64, - Arc>, - Arc>, - )> { - let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; - - let state_hash = utils::calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = services() - .rooms - .short - .get_or_create_shortstatehash(&state_hash)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(( - new_shortstatehash, - Arc::new(HashSet::new()), - Arc::new(HashSet::new()), - )); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved.clone(), - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } -} diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads.rs similarity index 56% rename from src/service/rooms/threads/mod.rs rename to src/service/rooms/threads.rs index c6193bc8..5acfdec8 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads.rs @@ -1,22 +1,22 @@ -mod data; - -pub use data::Data; use ruma::{ api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads}, events::relation::BundledThread, - uint, CanonicalJsonValue, EventId, RoomId, UserId, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, UserId, }; - use serde_json::json; use crate::{services, Error, PduEvent, Result}; -pub struct Service { - pub db: &'static dyn Data, +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, } impl Service { - pub fn threads_until<'a>( + pub(crate) fn threads_until<'a>( &'a self, user_id: &'a UserId, room_id: &'a RoomId, @@ -26,51 +26,64 @@ impl Service { self.db.threads_until(user_id, room_id, until, include) } - pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { - let root_id = &services() - .rooms - .timeline - .get_pdu_id(root_event_id)? - .ok_or_else(|| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid event id in thread message", - ) - })?; + pub(crate) fn add_to_thread( + &self, + root_event_id: &EventId, + pdu: &PduEvent, + ) -> Result<()> { + let root_id = + &services().rooms.timeline.get_pdu_id(root_event_id)?.ok_or_else( + || { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid event id in thread message", + ) + }, + )?; - let root_pdu = services() - .rooms - .timeline - .get_pdu_from_id(root_id)? - .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") - })?; + let root_pdu = + services().rooms.timeline.get_pdu_from_id(root_id)?.ok_or_else( + || { + Error::BadRequest( + ErrorKind::InvalidParam, + "Thread root pdu not found", + ) + }, + )?; let mut root_pdu_json = services() .rooms .timeline .get_pdu_json_from_id(root_id)? .ok_or_else(|| { - Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found") + Error::BadRequest( + ErrorKind::InvalidParam, + "Thread root pdu not found", + ) })?; - if let CanonicalJsonValue::Object(unsigned) = root_pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) + if let CanonicalJsonValue::Object(unsigned) = + root_pdu_json.entry("unsigned".to_owned()).or_insert_with(|| { + CanonicalJsonValue::Object(CanonicalJsonObject::default()) + }) { if let Some(mut relations) = unsigned .get("m.relations") .and_then(|r| r.as_object()) .and_then(|r| r.get("m.thread")) .and_then(|relations| { - serde_json::from_value::(relations.clone().into()).ok() + serde_json::from_value::( + relations.clone().into(), + ) + .ok() }) { // Thread already existed relations.count += uint!(1); relations.latest_event = pdu.to_message_like_event(); - let content = serde_json::to_value(relations).expect("to_value always works"); + let content = serde_json::to_value(relations) + .expect("to_value always works"); unsigned.insert( "m.relations".to_owned(), @@ -86,7 +99,8 @@ impl Service { current_user_participated: true, }; - let content = serde_json::to_value(relations).expect("to_value always works"); + let content = serde_json::to_value(relations) + .expect("to_value always works"); unsigned.insert( "m.relations".to_owned(), @@ -96,10 +110,11 @@ impl Service { ); } - services() - .rooms - .timeline - .replace_pdu(root_id, &root_pdu_json, &root_pdu)?; + services().rooms.timeline.replace_pdu( + root_id, + &root_pdu_json, + &root_pdu, + )?; } let mut users = Vec::new(); diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index e7159de0..384c23c8 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -1,7 +1,11 @@ -use crate::{PduEvent, Result}; -use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; +use ruma::{ + api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, + UserId, +}; -pub trait Data: Send + Sync { +use crate::{service::rooms::timeline::PduId, PduEvent, Result}; + +pub(crate) trait Data: Send + Sync { #[allow(clippy::type_complexity)] fn threads_until<'a>( &'a self, @@ -11,6 +15,13 @@ pub trait Data: Send + Sync { include: &'a IncludeThreads, ) -> Result> + 'a>>; - fn update_participants(&self, root_id: &[u8], participants: &[OwnedUserId]) -> Result<()>; - fn get_participants(&self, root_id: &[u8]) -> Result>>; + fn update_participants( + &self, + root_id: &PduId, + participants: &[OwnedUserId], + ) -> Result<()>; + fn get_participants( + &self, + root_id: &PduId, + ) -> Result>>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline.rs similarity index 53% rename from src/service/rooms/timeline/mod.rs rename to src/service/rooms/timeline.rs index acb00d01..bffd52ca 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline.rs @@ -1,70 +1,93 @@ -mod data; - use std::{ cmp::Ordering, - collections::{BTreeMap, HashMap, HashSet}, - sync::Arc, + collections::{hash_map, BTreeMap, HashMap, HashSet}, + sync::{Arc, Mutex}, }; -pub use data::Data; - +use lru_cache::LruCache; use ruma::{ api::{client::error::ErrorKind, federation}, canonical_json::to_canonical_value, events::{ - push_rules::PushRulesEvent, + push_rules::PushRulesEventContent, room::{ - create::RoomCreateEventContent, encrypted::Relation, member::MembershipState, - power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, + create::RoomCreateEventContent, encrypted::Relation, + member::MembershipState, message::RoomMessageEventContent, + power_levels::RoomPowerLevelsEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, + StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - serde::Base64, - state_res::{self, Event, RoomVersion}, - uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + state_res::{self, Event}, + uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tokio::sync::{Mutex, MutexGuard, RwLock}; +use tokio::sync::RwLock; use tracing::{error, info, warn}; +use super::{short::ShortRoomId, state_compressor::CompressedStateEvent}; use crate::{ api::server_server, + observability::{FoundIn, Lookup, METRICS}, service::{ appservice::NamespaceRegex, + globals::{marker, SigningKeys}, pdu::{EventHash, PduBuilder}, + rooms::state::ExtractVersion, }, - services, utils, Error, PduEvent, Result, + services, + utils::{self, on_demand_hashmap::KeyToken, room_version::RoomVersion}, + Error, PduEvent, Result, }; -use super::state_compressor::CompressedStateEvent; +mod data; + +pub(crate) use data::Data; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct PduId { + inner: Vec, +} + +impl PduId { + pub(crate) fn new(inner: Vec) -> Self { + Self { + inner, + } + } + + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.inner + } +} #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] -pub enum PduCount { +pub(crate) enum PduCount { Backfilled(u64), Normal(u64), } impl PduCount { - pub fn min() -> Self { - Self::Backfilled(u64::MAX) - } - pub fn max() -> Self { - Self::Normal(u64::MAX) - } + pub(crate) const MAX: Self = Self::Normal(u64::MAX); + pub(crate) const MIN: Self = Self::Backfilled(u64::MAX); - pub fn try_from_string(token: &str) -> Result { + pub(crate) fn try_from_string(token: &str) -> Result { if let Some(stripped) = token.strip_prefix('-') { stripped.parse().map(PduCount::Backfilled) } else { token.parse().map(PduCount::Normal) } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token.")) + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid pagination token.", + ) + }) } - pub fn stringify(&self) -> String { + pub(crate) fn stringify(&self) -> String { match self { PduCount::Backfilled(x) => format!("-{x}"), PduCount::Normal(x) => x.to_string(), @@ -89,61 +112,92 @@ impl Ord for PduCount { } } -pub struct Service { - pub db: &'static dyn Data, - - pub lasttimelinecount_cache: Mutex>, +pub(crate) struct Service { + db: &'static dyn Data, + pdu_cache: Mutex>>, + lasttimelinecount_cache: Mutex>, } impl Service { + pub(crate) fn new( + db: &'static dyn Data, + pdu_cache_capacity: usize, + ) -> Self { + Self { + db, + pdu_cache: Mutex::new(LruCache::new(pdu_cache_capacity)), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + } + } + #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + pub(crate) fn first_pdu_in_room( + &self, + room_id: &RoomId, + ) -> Result>> { + self.all_pdus(user_id!("@doesntmatter:grapevine"), room_id)? .next() .map(|o| o.map(|(_, p)| Arc::new(p))) .transpose() } #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - self.db.last_timeline_count(sender_user, room_id) + pub(crate) fn last_timeline_count( + &self, + sender_user: &UserId, + room_id: &RoomId, + ) -> Result { + let lookup = Lookup::LastTimelineCount; + + match self + .lasttimelinecount_cache + .lock() + .unwrap() + .entry(room_id.to_owned()) + { + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(sender_user, room_id, PduCount::MAX)? + .find_map(|x| match x { + Ok(x) => Some(x), + Err(error) => { + error!(%error, "Bad pdu in pdus_since"); + None + } + }) + { + METRICS.record_lookup(lookup, FoundIn::Database); + Ok(*v.insert(last_count.0)) + } else { + METRICS.record_lookup(lookup, FoundIn::Nothing); + Ok(PduCount::Normal(0)) + } + } + hash_map::Entry::Occupied(o) => { + METRICS.record_lookup(lookup, FoundIn::Cache); + Ok(*o.get()) + } + } } /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + pub(crate) fn get_pdu_count( + &self, + event_id: &EventId, + ) -> Result> { self.db.get_pdu_count(event_id) } - // TODO Is this the same as the function above? - /* - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - */ - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + pub(crate) fn get_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { self.db.get_pdu_json(event_id) } /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + pub(crate) fn get_non_outlier_pdu_json( &self, event_id: &EventId, ) -> Result> { @@ -151,74 +205,103 @@ impl Service { } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { + pub(crate) fn get_pdu_id( + &self, + event_id: &EventId, + ) -> Result> { self.db.get_pdu_id(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.db.get_non_outlier_pdu(event_id) - } + pub(crate) fn get_pdu( + &self, + event_id: &EventId, + ) -> Result>> { + let lookup = Lookup::Pdu; - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - self.db.get_pdu(event_id) + if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { + METRICS.record_lookup(lookup, FoundIn::Cache); + return Ok(Some(Arc::clone(p))); + } + + if let Some(pdu) = self.db.get_pdu(event_id)? { + METRICS.record_lookup(lookup, FoundIn::Database); + self.pdu_cache + .lock() + .unwrap() + .insert(event_id.to_owned(), Arc::clone(&pdu)); + Ok(Some(pdu)) + } else { + METRICS.record_lookup(lookup, FoundIn::Nothing); + Ok(None) + } } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + pub(crate) fn get_pdu_from_id( + &self, + pdu_id: &PduId, + ) -> Result> { self.db.get_pdu_from_id(pdu_id) } /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + pub(crate) fn get_pdu_json_from_id( + &self, + pdu_id: &PduId, + ) -> Result> { self.db.get_pdu_json_from_id(pdu_id) } /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] - pub fn replace_pdu( + pub(crate) fn replace_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, pdu_json: &CanonicalJsonObject, pdu: &PduEvent, ) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu_json, pdu) + self.db.replace_pdu(pdu_id, pdu_json)?; + self.pdu_cache.lock().unwrap().remove(&(*pdu.event_id).to_owned()); + Ok(()) } /// Creates a new persisted data unit and adds it to a room. /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. + /// By this point the incoming event should be fully authenticated, no auth + /// happens in `append_pdu`. /// /// Returns pdu id + #[allow(clippy::too_many_lines)] #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] - pub async fn append_pdu<'a>( + pub(crate) async fn append_pdu( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, leaves: Vec, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { + room_id: &KeyToken, + ) -> Result { + assert_eq!(*pdu.room_id, **room_id, "Token for incorrect room passed"); + let shortroomid = services() .rooms .short .get_shortroomid(&pdu.room_id)? .expect("room exists"); - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes + // Make unsigned fields correct. This is not properly documented in the + // spec, but state events need to have previous content in the + // unsigned field, so clients can easily interpret things like + // membership changes if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) + if let CanonicalJsonValue::Object(unsigned) = + pdu_json.entry("unsigned".to_owned()).or_insert_with(|| { + CanonicalJsonValue::Object(CanonicalJsonObject::default()) + }) { if let Some(shortstatehash) = services() .rooms @@ -229,20 +312,26 @@ impl Service { if let Some(prev_state) = services() .rooms .state_accessor - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) + .state_get( + shortstatehash, + &pdu.kind.to_string().into(), + state_key, + ) .unwrap() { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), + utils::to_canonical_object( + prev_state.content.clone(), + ) + .expect("event is valid, we just created it"), ), ); } } } else { - error!("Invalid unsigned type in pdu."); + error!("Invalid unsigned type in pdu"); } } @@ -251,43 +340,40 @@ impl Service { .rooms .pdu_metadata .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services() - .rooms - .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; + services().rooms.state.set_forward_extremities(room_id, leaves)?; - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .await - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().await; + let insert_token = services() + .globals + .roomid_mutex_insert + .lock_key(pdu.room_id.clone()) + .await; let count1 = services().globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - services() - .rooms - .edus - .read_receipt - .private_read_set(&pdu.room_id, &pdu.sender, count1)?; + // Mark as read first so the sending client doesn't get a notification + // even if appending fails + services().rooms.edus.read_receipt.private_read_set( + &pdu.room_id, + &pdu.sender, + count1, + )?; services() .rooms .user .reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = services().globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + let mut pdu_id = shortroomid.get().to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); + let pdu_id = PduId::new(pdu_id); // Insert pdu - self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; + self.db.append_pdu(&pdu_id, pdu, &pdu_json)?; + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), PduCount::Normal(count2)); - drop(insert_lock); + drop(insert_token); // See if the event matches any known pushers let power_levels: RoomPowerLevelsEventContent = services() @@ -295,8 +381,9 @@ impl Service { .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + serde_json::from_str(ev.content.get()).map_err(|_| { + Error::bad_database("invalid m.room.power_levels event") + }) }) .transpose()? .unwrap_or_default(); @@ -306,10 +393,8 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - let mut push_target = services() - .rooms - .state_cache - .get_our_real_users(&pdu.room_id)?; + let mut push_target = + services().rooms.state_cache.get_our_real_users(&pdu.room_id)?; if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { @@ -332,18 +417,14 @@ impl Service { let rules_for_user = services() .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? + .get_global::(user)? .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid push rules event in db.")) + event.deserialize().map_err(|_| { + Error::bad_database("Invalid push rules event in db.") + }) }) .transpose()? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); + .map_or_else(|| Ruleset::server_default(user), |ev| ev.global); let mut highlight = false; let mut notify = false; @@ -361,7 +442,7 @@ impl Service { highlight = true; } _ => {} - }; + } } if notify { @@ -377,63 +458,37 @@ impl Service { } } - self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights)?; + self.db.increment_notification_counts( + &pdu.room_id, + notifies, + highlights, + )?; match pdu.kind { TimelineEventType::RoomRedaction => { - let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; - match room_version_id { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => { - if let Some(redact_id) = &pdu.redacts { - if services().rooms.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - self.redact_pdu(redact_id, pdu)?; - } - } + let room_version_id = services() + .rooms + .state + .get_create_content::(&pdu.room_id)?; + let room_version = RoomVersion::try_from(&room_version_id)?; + + if let Some(redact_id) = pdu.redacts(&room_version) { + if services() + .rooms + .state_accessor + .redaction_event_allowed(pdu)? + { + self.redact_pdu(&redact_id, pdu, shortroomid)?; } - RoomVersionId::V11 => { - let content = - serde_json::from_str::(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid content in redaction pdu.") - })?; - if let Some(redact_id) = &content.redacts { - if services().rooms.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - self.redact_pdu(redact_id, pdu)?; - } - } - } - _ => unreachable!("Validity of room version already checked"), - }; + } } TimelineEventType::SpaceChild => { - if let Some(_state_key) = &pdu.state_key { + if pdu.state_key.is_some() { services() .rooms .spaces - .roomid_spacechunk_cache - .lock() - .await - .remove(&pdu.room_id); + .invalidate_cache(&pdu.room_id) + .await; } } TimelineEventType::RoomMember => { @@ -441,29 +496,66 @@ impl Service { #[derive(Deserialize)] struct ExtractMembership { membership: MembershipState, + reason: Option, } // if the state_key fails let target_user_id = UserId::parse(state_key.clone()) .expect("This state_key was previously validated"); - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + let ExtractMembership { + membership, + reason, + } = serde_json::from_str(pdu.content.get()).map_err( + |_| Error::bad_database("Invalid content in pdu."), + )?; - let invite_state = match content.membership { + let invite_state = match membership { MembershipState::Invite => { - let state = services().rooms.state.calculate_invite_state(pdu)?; + let state = services() + .rooms + .state + .get_helpful_invite_events(pdu)?; Some(state) } _ => None, }; - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth + if membership == MembershipState::Ban { + let (room, user) = (&pdu.room_id, &target_user_id); + + if user.server_name() + == services().globals.server_name() + { + info!( + %user, + %room, + reason, + "User has been banned from room" + ); + + let reason = match reason.filter(|s| !s.is_empty()) + { + Some(s) => format!(": {s}"), + None => String::new(), + }; + + services().admin.send_message( + RoomMessageEventContent::notice_plain(format!( + "User {user} has been banned from room \ + {room}{reason}", + )), + ); + } + } + + // Update our membership info, we do this here incase a user + // is invited and immediately leaves we + // need the DB to record the invite event for auth services().rooms.state_cache.update_membership( &pdu.room_id, &target_user_id, - content.membership, + &membership, &pdu.sender, invite_state, true, @@ -476,29 +568,47 @@ impl Service { body: Option, } - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + let content = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in pdu.") + })?; if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; + services().rooms.search.index_pdu( + shortroomid, + &pdu_id, + &body, + )?; - let server_user = format!("@conduit:{}", services().globals.server_name()); + let admin_bot = &services().globals.admin_bot_user_id; - let to_conduit = body.starts_with(&format!("{server_user}: ")) - || body.starts_with(&format!("{server_user} ")) - || body == format!("{server_user}:") - || body == server_user; + let to_admin_bot = body + .starts_with(&format!("{admin_bot}: ")) + || body.starts_with(&format!("{admin_bot} ")) + || body == format!("{admin_bot}:") + || body == admin_bot.as_str() + || body.starts_with("!admin ") + || body == "!admin"; - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = pdu.sender == server_user + // This will evaluate to false if the emergency password + // is set up so that the administrator can execute commands + // as the admin bot + let from_admin_bot = &pdu.sender == admin_bot && services().globals.emergency_password().is_none(); - if let Some(admin_room) = services().admin.get_admin_room()? { - if to_conduit && !from_conduit && admin_room == pdu.room_id { + if let Some(admin_room) = + services().admin.get_admin_room()? + { + if to_admin_bot + && !from_admin_bot + && admin_room == pdu.room_id + && services() + .rooms + .state_cache + .is_joined(admin_bot, &admin_room) + .unwrap_or(false) + { services().admin.process_message(body); } } @@ -508,27 +618,12 @@ impl Service { } // Update Relationships - #[derive(Deserialize)] - struct ExtractRelatesTo { - #[serde(rename = "m.relates_to")] - relates_to: Relation, - } - #[derive(Clone, Debug, Deserialize)] - struct ExtractEventId { - event_id: OwnedEventId, - } - #[derive(Clone, Debug, Deserialize)] - struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractEventId, - } - - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { - if let Some(related_pducount) = services() - .rooms - .timeline - .get_pdu_count(&content.relates_to.event_id)? + if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + if let Some(related_pducount) = + self.get_pdu_count(&content.relates_to.event_id)? { services() .rooms @@ -537,20 +632,22 @@ impl Service { } } - if let Ok(content) = serde_json::from_str::(pdu.content.get()) { + if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { match content.relates_to { - Relation::Reply { in_reply_to } => { + Relation::Reply { + in_reply_to, + } => { // We need to do it again here, because replies don't have // event_id as a top level field - if let Some(related_pducount) = services() - .rooms - .timeline - .get_pdu_count(&in_reply_to.event_id)? + if let Some(related_pducount) = + self.get_pdu_count(&in_reply_to.event_id)? { - services() - .rooms - .pdu_metadata - .add_relation(PduCount::Normal(count2), related_pducount)?; + services().rooms.pdu_metadata.add_relation( + PduCount::Normal(count2), + related_pducount, + )?; } } Relation::Thread(thread) => { @@ -559,7 +656,8 @@ impl Service { .threads .add_to_thread(&thread.event_id, pdu)?; } - _ => {} // TODO: Aggregate other types + // TODO: Aggregate other types + _ => {} } } @@ -569,21 +667,24 @@ impl Service { .state_cache .appservice_in_room(&pdu.room_id, appservice)? { - services() - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; + services().sending.send_pdu_appservice( + appservice.registration.id.clone(), + pdu_id.clone(), + )?; continue; } - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. + // If the RoomMember event has a non-empty state_key, it is targeted + // at someone. If it is our appservice user, we send + // this PDU to it. if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + if let Some(state_key_uid) = + &pdu.state_key.as_ref().and_then(|state_key| { + UserId::parse(state_key.as_str()).ok() + }) { - let appservice_uid = appservice.registration.sender_localpart.as_str(); + let appservice_uid = + appservice.registration.sender_localpart.as_str(); if state_key_uid == appservice_uid { services().sending.send_pdu_appservice( appservice.registration.id.clone(), @@ -600,14 +701,14 @@ impl Service { && pdu .state_key .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) + .is_some_and(|state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &NamespaceRegex| { services() .rooms .alias .local_aliases_for_room(&pdu.room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -615,21 +716,22 @@ impl Service { || appservice.rooms.is_match(pdu.room_id.as_str()) || matching_users(&appservice.users) { - services() - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?; + services().sending.send_pdu_appservice( + appservice.registration.id.clone(), + pdu_id.clone(), + )?; } } Ok(pdu_id) } - pub fn create_hash_and_sign_event( + #[allow(clippy::too_many_lines)] + pub(crate) fn create_hash_and_sign_event( &self, pdu_builder: PduBuilder, sender: &UserId, - room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + room_id: &KeyToken, ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -651,24 +753,30 @@ impl Service { let room_version_id = services() .rooms .state - .get_room_version(room_id) + .get_create_content::(room_id) .or_else(|_| { if event_type == TimelineEventType::RoomCreate { - let content = serde_json::from_str::(content.get()) + let content = + serde_json::from_str::( + content.get(), + ) .expect("Invalid content in RoomCreate pdu."); Ok(content.room_version) } else { Err(Error::InconsistentRoomState( "non-create event for room of unknown version", - room_id.to_owned(), + (**room_id).clone(), )) } })?; - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id)); + }; let auth_events = services().rooms.state.get_auth_events( room_id, + &room_version_rules.authorization, &event_type, sender, state_key.as_deref(), @@ -678,7 +786,7 @@ impl Service { // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); @@ -686,25 +794,29 @@ impl Service { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = services().rooms.state_accessor.room_state_get( - room_id, - &event_type.to_string().into(), - state_key, - )? { + if let Some(prev_pdu) = + services().rooms.state_accessor.room_state_get( + room_id, + &event_type.to_string().into(), + state_key, + )? + { unsigned.insert( "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), + serde_json::from_str(prev_pdu.content.get()) + .expect("string is valid json"), ); unsigned.insert( "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + serde_json::to_value(&prev_pdu.sender) + .expect("UserId::to_value always works"), ); } } let mut pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), + room_id: (**room_id).clone(), sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() @@ -722,7 +834,9 @@ impl Service { unsigned: if unsigned.is_empty() { None } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + Some( + to_raw_value(&unsigned).expect("to_raw_value always works"), + ) }, hashes: EventHash { sha256: "aaa".to_owned(), @@ -730,27 +844,19 @@ impl Service { signatures: None, }; - let auth_check = state_res::auth_check( - &room_version, + state_res::auth_check( + &room_version_rules.authorization, &pdu, - None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") + .map_err(|error| { + error!(%error, "Auth check failed"); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed.") })?; - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + let mut pdu_json = utils::to_canonical_object(&pdu) + .expect("event is valid, we just created it"); pdu_json.remove("event_id"); @@ -765,9 +871,9 @@ impl Service { services().globals.server_name().as_str(), services().globals.keypair(), &mut pdu_json, - &room_version_id, + &room_version_rules.redaction, ) { - Ok(_) => {} + Ok(()) => {} Err(e) => { return match e { ruma::signatures::Error::PduSize => Err(Error::BadRequest( @@ -785,7 +891,7 @@ impl Service { // Generate event id pdu.event_id = EventId::parse_arc(format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) + ruma::signatures::reference_hash(&pdu_json, &room_version_rules) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -796,34 +902,33 @@ impl Service { ); // Generate short event id - let _shorteventid = services() - .rooms - .short - .get_or_create_shorteventid(&pdu.event_id)?; + let _shorteventid = + services().rooms.short.get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, state_lock))] - pub async fn build_and_append_pdu( + /// Creates a new persisted data unit and adds it to a room. This function + /// takes a roomid_mutex_state, meaning that only this function is able + /// to mutate the room state. + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self))] + pub(crate) async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, - room_id: &RoomId, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + room_id: &KeyToken, ) -> Result> { let (pdu, pdu_json) = - self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; + self.create_hash_and_sign_event(pdu_builder, sender, room_id)?; if let Some(admin_room) = services().admin.get_admin_room()? { - if admin_room == room_id { + if admin_room == **room_id { match pdu.event_type() { TimelineEventType::RoomEncryption => { warn!("Encryption is not allowed in the admins room"); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Encryption is not allowed in the admins room.", )); } @@ -838,16 +943,32 @@ impl Service { .filter(|v| v.starts_with('@')) .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); - let server_user = format!("@conduit:{}", server_name); - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + let server_user = format!( + "@{}:{server_name}", + if services().globals.config.conduit_compat { + "conduit" + } else { + "grapevine" + }, + ); + let content = + serde_json::from_str::( + pdu.content.get(), + ) + .map_err(|_| { + Error::bad_database("Invalid content in pdu.") + })?; if content.membership == MembershipState::Leave { if target == server_user { - warn!("Conduit user cannot leave from admins room"); + warn!( + "Grapevine user cannot leave from admins \ + room" + ); return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot leave from admins room.", + ErrorKind::forbidden(), + "Grapevine user cannot leave from admins \ + room.", )); } @@ -855,25 +976,33 @@ impl Service { .rooms .state_cache .room_members(room_id) - .filter_map(|m| m.ok()) + .filter_map(Result::ok) .filter(|m| m.server_name() == server_name) .filter(|m| m != target) .count(); if count < 2 { - warn!("Last admin cannot leave from admins room"); + warn!( + "Last admin cannot leave from admins room" + ); return Err(Error::BadRequest( - ErrorKind::Forbidden, + ErrorKind::forbidden(), "Last admin cannot leave from admins room.", )); } } - if content.membership == MembershipState::Ban && pdu.state_key().is_some() { + if content.membership == MembershipState::Ban + && pdu.state_key().is_some() + { if target == server_user { - warn!("Conduit user cannot be banned in admins room"); + warn!( + "Grapevine user cannot be banned in \ + admins room" + ); return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Conduit user cannot be banned in admins room.", + ErrorKind::forbidden(), + "Grapevine user cannot be banned in \ + admins room.", )); } @@ -881,15 +1010,19 @@ impl Service { .rooms .state_cache .room_members(room_id) - .filter_map(|m| m.ok()) + .filter_map(Result::ok) .filter(|m| m.server_name() == server_name) .filter(|m| m != target) .count(); if count < 2 { - warn!("Last admin cannot be banned in admins room"); + warn!( + "Last admin cannot be banned in admins \ + room" + ); return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Last admin cannot be banned in admins room.", + ErrorKind::forbidden(), + "Last admin cannot be banned in admins \ + room.", )); } } @@ -899,93 +1032,47 @@ impl Service { } } - // If redaction event is not authorized, do not append it to the timeline - if pdu.kind == TimelineEventType::RoomRedaction { - match services().rooms.state.get_room_version(&pdu.room_id)? { - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - | RoomVersionId::V7 - | RoomVersionId::V8 - | RoomVersionId::V9 - | RoomVersionId::V10 => { - if let Some(redact_id) = &pdu.redacts { - if !services().rooms.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "User cannot redact this event.", - )); - } - }; - } - RoomVersionId::V11 => { - let content = - serde_json::from_str::(pdu.content.get()) - .map_err(|_| { - Error::bad_database("Invalid content in redaction pdu.") - })?; - - if let Some(redact_id) = &content.redacts { - if !services().rooms.state_accessor.user_can_redact( - redact_id, - &pdu.sender, - &pdu.room_id, - false, - )? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "User cannot redact this event.", - )); - } - } - } - _ => { - return Err(Error::BadRequest( - ErrorKind::UnsupportedRoomVersion, - "Unsupported room version", - )); - } - } + // If redaction event is not authorized, do not append it to the + // timeline + if pdu.kind == TimelineEventType::RoomRedaction + && !services().rooms.state_accessor.redaction_event_allowed(&pdu)? + { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User cannot redact this event.", + )); } - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. + // We append to state before appending the pdu, so we don't have a + // moment in time with the pdu without it's state. This is okay + // because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; let pdu_id = self .append_pdu( &pdu, pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room + // Since this PDU references all pdu_leaves we can update the + // leaves of the room vec![(*pdu.event_id).to_owned()], - state_lock, + room_id, ) .await?; - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - services() - .rooms - .state - .set_room_state(room_id, statehashid, state_lock)?; + // We set the room state after inserting the pdu, so that we never have + // a moment in time where events in the current room state do + // not exist + services().rooms.state.set_room_state(room_id, statehashid)?; let mut servers: HashSet = services() .rooms .state_cache .room_servers(room_id) - .filter_map(|r| r.ok()) + .filter_map(Result::ok) .collect(); - // In case we are kicking or banning a user, we need to inform their server of the change + // In case we are kicking or banning a user, we need to inform their + // server of the change if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key @@ -996,7 +1083,8 @@ impl Service { } } - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above + // Remove our server from the server list since it will be added to it + // by room_servers() and/or the if statement above servers.remove(services().globals.server_name()); services().sending.send_pdu(servers.into_iter(), &pdu_id)?; @@ -1004,20 +1092,23 @@ impl Service { Ok(pdu.event_id) } - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. + /// Append the incoming event setting the state snapshot to the state from + /// the server that sent the event. #[tracing::instrument(skip_all)] - pub async fn append_incoming_pdu<'a>( + pub(crate) async fn append_incoming_pdu( &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: Vec, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. + room_id: &KeyToken, + ) -> Result> { + assert_eq!(*pdu.room_id, **room_id, "Token for incorrect room passed"); + + // We append to state before appending the pdu, so we don't have a + // moment in time with the pdu without it's state. This is okay + // because append_pdu can't fail. services().rooms.state.set_event_state( &pdu.event_id, &pdu.room_id, @@ -1028,37 +1119,34 @@ impl Service { services() .rooms .pdu_metadata - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.state.set_forward_extremities( - &pdu.room_id, - new_room_leaves, - state_lock, - )?; + .mark_as_referenced(room_id, &pdu.prev_events)?; + services() + .rooms + .state + .set_forward_extremities(room_id, new_room_leaves)?; return Ok(None); } - let pdu_id = services() - .rooms - .timeline - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) - .await?; + let pdu_id = + self.append_pdu(pdu, pdu_json, new_room_leaves, room_id).await?; Ok(Some(pdu_id)) } /// Returns an iterator over all PDUs in a room. - pub fn all_pdus<'a>( + pub(crate) fn all_pdus<'a>( &'a self, user_id: &UserId, room_id: &RoomId, ) -> Result> + 'a> { - self.pdus_after(user_id, room_id, PduCount::min()) + self.pdus_after(user_id, room_id, PduCount::MIN) } - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. + /// Returns an iterator over all events and their tokens in a room that + /// happened before the event with id `until` in reverse-chronological + /// order. #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( + pub(crate) fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -1067,10 +1155,10 @@ impl Service { self.db.pdus_until(user_id, room_id, until) } - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. + /// Returns an iterator over all events and their token in a room that + /// happened after the event with id `from` in chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( + pub(crate) fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -1081,14 +1169,42 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { + pub(crate) fn redact_pdu( + &self, + event_id: &EventId, + reason: &PduEvent, + shortroomid: ShortRoomId, + ) -> Result<()> { // TODO: Don't reserialize, keep original json if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?; - pdu.redact(room_version_id, reason)?; + #[derive(Deserialize)] + struct ExtractBody { + body: String, + } + + let mut pdu = self.get_pdu_from_id(&pdu_id)?.ok_or_else(|| { + Error::bad_database("PDU ID points to invalid PDU.") + })?; + + if let Ok(content) = + serde_json::from_str::(pdu.content.get()) + { + services().rooms.search.deindex_pdu( + shortroomid, + &pdu_id, + &content.body, + )?; + } + + let room_version_id = services() + .rooms + .state + .get_create_content::(&pdu.room_id)?; + let Some(room_version_rules) = room_version_id.rules() else { + return Err(Error::UnsupportedRoomVersion(room_version_id)); + }; + pdu.redact(&room_version_rules.redaction, reason)?; + self.replace_pdu( &pdu_id, &utils::to_canonical_object(&pdu).expect("PDU is an object"), @@ -1100,9 +1216,13 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { + pub(crate) async fn backfill_if_required( + &self, + room_id: &RoomId, + from: PduCount, + ) -> Result<()> { let first_pdu = self - .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? + .all_pdus(user_id!("@doesntmatter:grapevine"), room_id)? .next() .expect("Room is not empty")?; @@ -1116,8 +1236,9 @@ impl Service { .state_accessor .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + serde_json::from_str(ev.content.get()).map_err(|_| { + Error::bad_database("invalid m.room.power_levels event") + }) }) .transpose()? .unwrap_or_default(); @@ -1131,7 +1252,7 @@ impl Service { // Request backfill for backfill_server in admin_servers { - info!("Asking {backfill_server} for backfill"); + info!(server = %backfill_server, "Asking server for backfill"); let response = services() .sending .send_federation_request( @@ -1147,15 +1268,21 @@ impl Service { Ok(response) => { let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await + if let Err(error) = self + .backfill_pdu(backfill_server, pdu, &pub_key_map) + .await { - warn!("Failed to add backfilled pdu: {e}"); + warn!(%error, "Failed to add backfilled pdu"); } } return Ok(()); } - Err(e) => { - warn!("{backfill_server} could not provide backfill: {e}"); + Err(error) => { + warn!( + server = %backfill_server, + %error, + "Server could not provide backfill", + ); } } } @@ -1165,36 +1292,39 @@ impl Service { } #[tracing::instrument(skip(self, pdu))] - pub async fn backfill_pdu( + pub(crate) async fn backfill_pdu( &self, origin: &ServerName, pdu: Box, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>, ) -> Result<()> { - let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?; + let (event_id, value, room_id) = + server_server::parse_incoming_pdu(&pdu)?; // Lock so we cannot backfill the same pdu twice at the same time - let mutex = Arc::clone( - services() - .globals - .roomid_mutex_federation - .write() - .await - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; + let federation_token = services() + .globals + .roomid_mutex_federation + .lock_key(room_id.clone()) + .await; // Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? { - info!("We already know {event_id} at {pdu_id:?}"); + if let Some(pdu_id) = self.get_pdu_id(&event_id)? { + info!(%event_id, ?pdu_id, "We already know this event"); return Ok(()); } services() .rooms .event_handler - .handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) + .handle_incoming_pdu( + origin, + &event_id, + &room_id, + value, + false, + pub_key_map, + ) .await?; let value = self.get_pdu_json(&event_id)?.expect("We just created it"); @@ -1206,26 +1336,22 @@ impl Service { .get_shortroomid(&room_id)? .expect("room exists"); - let mutex_insert = Arc::clone( - services() - .globals - .roomid_mutex_insert - .write() - .await - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().await; + let insert_token = services() + .globals + .roomid_mutex_insert + .lock_key(room_id.clone()) + .await; let count = services().globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + let mut pdu_id = shortroomid.get().to_be_bytes().to_vec(); pdu_id.extend_from_slice(&0_u64.to_be_bytes()); pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes()); + let pdu_id = PduId::new(pdu_id); // Insert pdu self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?; - drop(insert_lock); + drop(insert_token); if pdu.kind == TimelineEventType::RoomMessage { #[derive(Deserialize)] @@ -1233,23 +1359,43 @@ impl Service { body: Option, } - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + let content = + serde_json::from_str::(pdu.content.get()) + .map_err(|_| { + Error::bad_database("Invalid content in pdu.") + })?; if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; + services().rooms.search.index_pdu( + shortroomid, + &pdu_id, + &body, + )?; } } - drop(mutex_lock); + drop(federation_token); info!("Prepended backfill pdu"); Ok(()) } } +#[derive(Deserialize)] +struct ExtractRelatesTo { + #[serde(rename = "m.relates_to")] + relates_to: Relation, +} + +#[derive(Clone, Debug, Deserialize)] +struct ExtractEventId { + event_id: OwnedEventId, +} +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractEventId, +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 6290b8cc..49b0c8a2 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -2,29 +2,35 @@ use std::sync::Arc; use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; -use crate::{PduEvent, Result}; - use super::PduCount; +use crate::{service::rooms::timeline::PduId, PduEvent, Result}; -pub trait Data: Send + Sync { - fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; - +pub(crate) trait Data: Send + Sync { /// Returns the `count` of this pdu's id. fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - fn get_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_pdu_json( + &self, + event_id: &EventId, + ) -> Result>; /// Returns the json of a pdu. - fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result>; /// Returns the pdu's id. - fn get_pdu_id(&self, event_id: &EventId) -> Result>>; + fn get_pdu_id(&self, event_id: &EventId) -> Result>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn get_non_outlier_pdu( + &self, + event_id: &EventId, + ) -> Result>; /// Returns the pdu. /// @@ -34,24 +40,26 @@ pub trait Data: Send + Sync { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_from_id(&self, pdu_id: &PduId) -> Result>; /// Returns the pdu as a `BTreeMap`. - fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_json_from_id( + &self, + pdu_id: &PduId, + ) -> Result>; /// Adds a new pdu to the timeline fn append_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, pdu: &PduEvent, json: &CanonicalJsonObject, - count: u64, ) -> Result<()>; // Adds a new pdu to the backfilled timeline fn prepend_backfill_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, event_id: &EventId, json: &CanonicalJsonObject, ) -> Result<()>; @@ -59,13 +67,13 @@ pub trait Data: Send + Sync { /// Removes a pdu and creates a new one with the same id. fn replace_pdu( &self, - pdu_id: &[u8], + pdu_id: &PduId, pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, ) -> Result<()>; - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. + /// Returns an iterator over all events and their tokens in a room that + /// happened before the event with id `until` in reverse-chronological + /// order. #[allow(clippy::type_complexity)] fn pdus_until<'a>( &'a self, @@ -74,8 +82,8 @@ pub trait Data: Send + Sync { until: PduCount, ) -> Result> + 'a>>; - /// Returns an iterator over all events in a room that happened after the event with id `from` - /// in chronological order. + /// Returns an iterator over all events in a room that happened after the + /// event with id `from` in chronological order. #[allow(clippy::type_complexity)] fn pdus_after<'a>( &'a self, diff --git a/src/service/rooms/user.rs b/src/service/rooms/user.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/rooms/user.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 4b8a4eca..d948e3e0 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,24 +1,45 @@ -use crate::Result; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; -pub trait Data: Send + Sync { - fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; +use crate::{service::rooms::short::ShortStateHash, Result}; - fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; +pub(crate) trait Data: Send + Sync { + fn reset_notification_counts( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()>; - fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + fn notification_count( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result; + + fn highlight_count( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result; // Returns the count at which the last reset_notification_counts was called - fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result; + fn last_notification_read( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result; fn associate_token_shortstatehash( &self, room_id: &RoomId, token: u64, - shortstatehash: u64, + shortstatehash: ShortStateHash, ) -> Result<()>; - fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; + fn get_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + ) -> Result>; fn get_shared_rooms<'a>( &'a self, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs deleted file mode 100644 index 672e502d..00000000 --- a/src/service/rooms/user/mod.rs +++ /dev/null @@ -1,49 +0,0 @@ -mod data; - -pub use data::Data; -use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; - -use crate::Result; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.reset_notification_counts(user_id, room_id) - } - - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.notification_count(user_id, room_id) - } - - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.highlight_count(user_id, room_id) - } - - pub fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_notification_read(user_id, room_id) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - self.db - .associate_token_shortstatehash(room_id, token, shortstatehash) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - self.db.get_token_shortstatehash(room_id, token) - } - - pub fn get_shared_rooms( - &self, - users: Vec, - ) -> Result>> { - self.db.get_shared_rooms(users) - } -} diff --git a/src/service/sending.rs b/src/service/sending.rs new file mode 100644 index 00000000..c7723e45 --- /dev/null +++ b/src/service/sending.rs @@ -0,0 +1,988 @@ +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::{Duration, Instant}, +}; + +use base64::{engine::general_purpose, Engine as _}; +use federation::transactions::send_transaction_message; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + api::{ + appservice::{self, Registration}, + federation::{ + self, + transactions::edu::{ + DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, + ReceiptMap, + }, + }, + OutgoingRequest, + }, + device_id, + events::{ + push_rules::PushRulesEventContent, receipt::ReceiptType, + AnySyncEphemeralRoomEvent, + }, + push, + serde::Raw, + uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, + UInt, UserId, +}; +use tokio::{ + select, + sync::{mpsc, Mutex, Semaphore}, +}; +use tracing::{debug, error, warn, Span}; + +use super::rooms::timeline::PduId; +use crate::{ + api::{ + appservice_server, + server_server::{self, AllowLoopbackRequests, LogRequestError}, + }, + services, + utils::{calculate_hash, debug_slice_truncated}, + Config, Error, PduEvent, Result, +}; + +mod data; + +pub(crate) use data::Data; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) enum Destination { + Appservice(String), + // user and pushkey + Push(OwnedUserId, String), + Normal(OwnedServerName), +} + +impl Destination { + #[tracing::instrument(skip(self))] + pub(crate) fn get_prefix(&self) -> Vec { + let mut prefix = match self { + Destination::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + } + Destination::Push(user, pushkey) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(user.as_bytes()); + p.push(0xFF); + p.extend_from_slice(pushkey.as_bytes()); + p + } + Destination::Normal(server) => { + let mut p = Vec::new(); + p.extend_from_slice(server.as_bytes()); + p + } + }; + prefix.push(0xFF); + + prefix + } +} + +#[derive(Clone, Debug)] +pub(crate) enum SendingEventType { + // pduid + Pdu(PduId), + // pdu json + Edu(Raw), +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub(crate) struct RequestKey(Vec); + +impl RequestKey { + pub(crate) fn new(key: Vec) -> Self { + Self(key) + } + + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.0 + } +} + +pub(crate) struct RequestData { + destination: Destination, + /// The PDU or reliable EDU and its associated key, or `None` if this is + /// only a trigger to collect and send EDUs to the destination (e.g. + /// read receipts). + event: Option<(SendingEventType, RequestKey)>, + /// Span of the original `send_*()` method call + requester_span: Span, +} + +pub(crate) struct Service { + db: &'static dyn Data, + + /// The state for a given state hash. + pub(super) maximum_requests: Arc, + pub(crate) sender: mpsc::UnboundedSender, + receiver: Mutex>, +} + +#[derive(Debug)] +enum TransactionStatus { + Running, + // number of times failed, time of last failure + Failed(u32, Instant), + // number of times failed + Retrying(u32), +} + +struct HandlerInputs { + destination: Destination, + events: Vec, + /// Span of the original `send_*()` method call, if known (gets lost when + /// event is persisted to database) + requester_span: Option, +} + +#[derive(Debug)] +struct HandlerResponse { + destination: Destination, + result: Result<()>, + /// The span of the just-completed handler, for follows-from relationships. + handler_span: Span, +} + +type TransactionStatusMap = HashMap; + +enum SelectedEvents { + None, + Retries(Vec), + New(Vec), +} + +impl Service { + pub(crate) fn new(db: &'static dyn Data, config: &Config) -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + Arc::new(Self { + db, + sender, + receiver: Mutex::new(receiver), + maximum_requests: Arc::new(Semaphore::new( + config.federation.max_concurrent_requests.into(), + )), + }) + } + + pub(crate) fn start_handler(self: &Arc) { + let self2 = Arc::clone(self); + tokio::spawn(async move { + self2.handler().await.unwrap(); + }); + } + + async fn handler(&self) -> Result<()> { + let mut receiver = self.receiver.lock().await; + + let mut futures = FuturesUnordered::new(); + + let mut current_transaction_status = TransactionStatusMap::new(); + + // Retry requests we could not finish yet + let mut initial_transactions = + HashMap::>::new(); + + for (key, destination, event) in + self.db.active_requests().filter_map(Result::ok) + { + let entry = + initial_transactions.entry(destination.clone()).or_default(); + + if entry.len() > 30 { + warn!( + ?key, + ?destination, + ?event, + "Dropping some current events", + ); + self.db.delete_active_request(key)?; + continue; + } + + entry.push(event); + } + + for (destination, events) in initial_transactions { + current_transaction_status + .insert(destination.clone(), TransactionStatus::Running); + futures.push(handle_events(HandlerInputs { + destination: destination.clone(), + events, + requester_span: None, + })); + } + + loop { + select! { + Some(response) = futures.next() => { + if let Some(inputs) = self.handle_response( + response, + &mut current_transaction_status, + )? { + futures.push(handle_events(inputs)); + } + } + Some(data) = receiver.recv() => { + if let Some(inputs) = self.handle_receiver( + data, &mut current_transaction_status + ) { + futures.push(handle_events(inputs)); + } + } + } + } + } + + #[tracing::instrument( + skip(self, result, handler_span, current_transaction_status), + fields( + current_status = ?current_transaction_status.get( + &destination + ), + error, + ), + )] + fn handle_response( + &self, + HandlerResponse { + destination, + result, + handler_span, + }: HandlerResponse, + current_transaction_status: &mut TransactionStatusMap, + ) -> Result> { + // clone() is required for the relationship to show up in jaeger + Span::current().follows_from(handler_span.clone()); + + if let Err(e) = &result { + Span::current().record("error", e.to_string()); + } + + if let Err(error) = result { + warn!(%error, "Marking transaction as failed"); + current_transaction_status.entry(destination).and_modify(|e| { + use TransactionStatus::{Failed, Retrying, Running}; + + *e = match e { + Running => Failed(1, Instant::now()), + Retrying(n) => Failed(*n + 1, Instant::now()), + Failed(..) => { + error!("Request that was not even running failed?!"); + return; + } + } + }); + return Ok(None); + } + + self.db.delete_all_active_requests_for(&destination)?; + + // Find events that have been added since starting the + // last request + let new_events = self + .db + .queued_requests(&destination) + .filter_map(Result::ok) + .take(30) + .collect::>(); + + if new_events.is_empty() { + current_transaction_status.remove(&destination); + return Ok(None); + } + + // Insert pdus we found + self.db.mark_as_active(&new_events)?; + + Ok(Some(HandlerInputs { + destination: destination.clone(), + events: new_events.into_iter().map(|(event, _)| event).collect(), + requester_span: None, + })) + } + + #[tracing::instrument( + skip(self, event, requester_span, current_transaction_status), + fields( + current_status = ?current_transaction_status.get(&destination), + ), + )] + fn handle_receiver( + &self, + RequestData { + destination, + event, + requester_span, + }: RequestData, + current_transaction_status: &mut TransactionStatusMap, + ) -> Option { + // clone() is required for the relationship to show up in jaeger + Span::current().follows_from(requester_span.clone()); + + match self.select_events( + &destination, + event.into_iter().collect(), + current_transaction_status, + ) { + Ok(SelectedEvents::Retries(events)) => { + debug!("Retrying old events"); + Some(HandlerInputs { + destination, + events, + requester_span: None, + }) + } + Ok(SelectedEvents::New(events)) => { + debug!("Sending new event"); + Some(HandlerInputs { + destination, + events, + requester_span: Some(requester_span), + }) + } + Ok(SelectedEvents::None) => { + debug!("Holding off from sending any events"); + None + } + Err(error) => { + error!(%error, "Failed to select events to send"); + None + } + } + } + + #[tracing::instrument( + skip(self, new_events, current_transaction_status), + fields( + new_events = debug_slice_truncated(&new_events, 3), + current_status = ?current_transaction_status.get(destination), + ), + )] + fn select_events( + &self, + destination: &Destination, + // Events we want to send: event and full key + new_events: Vec<(SendingEventType, RequestKey)>, + current_transaction_status: &mut HashMap< + Destination, + TransactionStatus, + >, + ) -> Result { + let mut retry = false; + let mut allow = true; + + let entry = current_transaction_status.entry(destination.clone()); + + entry + .and_modify(|e| match e { + TransactionStatus::Running | TransactionStatus::Retrying(_) => { + // already running + allow = false; + } + TransactionStatus::Failed(tries, time) => { + // Fail if a request has failed recently (exponential + // backoff) + let mut min_elapsed_duration = + Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) + { + min_elapsed_duration = + Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + allow = false; + } else { + retry = true; + *e = TransactionStatus::Retrying(*tries); + } + } + }) + .or_insert(TransactionStatus::Running); + + if !allow { + return Ok(SelectedEvents::None); + } + + if retry { + // We retry the previous transaction + let events = self + .db + .active_requests_for(destination) + .filter_map(Result::ok) + .map(|(_, e)| e) + .collect(); + + Ok(SelectedEvents::Retries(events)) + } else { + let mut events = Vec::new(); + + self.db.mark_as_active(&new_events)?; + for (e, _) in new_events { + events.push(e); + } + + if let Destination::Normal(server_name) = destination { + if let Ok((select_edus, last_count)) = + self.select_edus(server_name) + { + events.extend( + select_edus.into_iter().map(SendingEventType::Edu), + ); + + self.db.set_latest_educount(server_name, last_count)?; + } + } + + Ok(SelectedEvents::New(events)) + } + } + + #[tracing::instrument(skip(self))] + pub(crate) fn select_edus( + &self, + server_name: &ServerName, + ) -> Result<(Vec>, u64)> { + // u64: count of last edu + let since = self.db.get_latest_educount(server_name)?; + let mut events = Vec::new(); + let mut max_edu_count = since; + let mut device_list_changes = HashSet::new(); + + 'outer: for room_id in + services().rooms.state_cache.server_rooms(server_name) + { + let room_id = room_id?; + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(Result::ok) + .filter(|user_id| { + user_id.server_name() + == services().globals.server_name() + }), + ); + + // Look for read receipts in this room + for r in services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + { + let (user_id, count, read_receipt) = r?; + + if count > max_edu_count { + max_edu_count = count; + } + + if user_id.server_name() != services().globals.server_name() { + continue; + } + + let event: AnySyncEphemeralRoomEvent = serde_json::from_str( + read_receipt.json().get(), + ) + .map_err(|_| { + Error::bad_database("Invalid edu event in read_receipts.") + })?; + let federation_event = + if let AnySyncEphemeralRoomEvent::Receipt(r) = event { + let mut read = BTreeMap::new(); + + let (event_id, mut receipt) = + r.content.0.into_iter().next().expect( + "we only use one event per read receipt", + ); + let receipt = receipt + .remove(&ReceiptType::Read) + .expect("our read receipts always set this") + .remove(&user_id) + .expect( + "our read receipts always have the user here", + ); + + read.insert( + user_id, + ReceiptData { + data: receipt.clone(), + event_ids: vec![event_id.clone()], + }, + ); + + let receipt_map = ReceiptMap { + read, + }; + + let mut receipts = BTreeMap::new(); + receipts.insert(room_id.clone(), receipt_map); + + Edu::Receipt(ReceiptContent { + receipts, + }) + } else { + Error::bad_database( + "Invalid event type in read_receipts", + ); + continue; + }; + + events.push( + Raw::new(&federation_event) + .expect("json can be serialized"), + ); + + if events.len() >= 20 { + break 'outer; + } + } + } + + for user_id in device_list_changes { + // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767 + // Because synapse resyncs, we can just insert dummy data + let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + device_id: device_id!("dummy").to_owned(), + device_display_name: Some("Dummy".to_owned()), + stream_id: uint!(1), + prev_id: Vec::new(), + deleted: None, + keys: None, + }); + + events.push(Raw::new(&edu).expect("json can be serialized")); + } + + Ok((events, max_edu_count)) + } + + #[tracing::instrument(skip(self, pdu_id, user, pushkey))] + pub(crate) fn send_push_pdu( + &self, + pdu_id: &PduId, + user: &UserId, + pushkey: String, + ) -> Result<()> { + let destination = Destination::Push(user.to_owned(), pushkey); + let event_type = SendingEventType::Pdu(pdu_id.to_owned()); + let key = self + .db + .queue_requests(&[(&destination, &event_type)])? + .into_iter() + .next() + .unwrap(); + self.sender + .send(RequestData { + destination, + event: Some((event_type, key)), + requester_span: Span::current(), + }) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self, servers, pdu_id))] + pub(crate) fn send_pdu>( + &self, + servers: I, + pdu_id: &PduId, + ) -> Result<()> { + let requests = servers + .into_iter() + .map(|server| { + ( + Destination::Normal(server), + SendingEventType::Pdu(pdu_id.to_owned()), + ) + }) + .collect::>(); + let keys = self.db.queue_requests( + &requests.iter().map(|(o, e)| (o, e)).collect::>(), + )?; + for ((destination, event_type), key) in requests.into_iter().zip(keys) { + self.sender + .send(RequestData { + destination: destination.clone(), + event: Some((event_type, key)), + requester_span: Span::current(), + }) + .unwrap(); + } + + Ok(()) + } + + #[tracing::instrument(skip(self, server, serialized))] + pub(crate) fn send_reliable_edu( + &self, + server: &ServerName, + serialized: Raw, + id: u64, + ) -> Result<()> { + let destination = Destination::Normal(server.to_owned()); + let event_type = SendingEventType::Edu(serialized); + let key = self + .db + .queue_requests(&[(&destination, &event_type)])? + .into_iter() + .next() + .unwrap(); + self.sender + .send(RequestData { + destination, + event: Some((event_type, key)), + requester_span: Span::current(), + }) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn trigger_edu_send(&self, server: &ServerName) -> Result<()> { + if server == services().globals.server_name() { + debug!("Ignoring EDU send request to ourselves"); + return Ok(()); + } + let destination = Destination::Normal(server.to_owned()); + self.sender + .send(RequestData { + destination, + event: None, + requester_span: Span::current(), + }) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub(crate) fn send_pdu_appservice( + &self, + appservice_id: String, + pdu_id: PduId, + ) -> Result<()> { + let destination = Destination::Appservice(appservice_id); + let event_type = SendingEventType::Pdu(pdu_id); + let key = self + .db + .queue_requests(&[(&destination, &event_type)])? + .into_iter() + .next() + .unwrap(); + self.sender + .send(RequestData { + destination, + event: Some((event_type, key)), + requester_span: Span::current(), + }) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self, request))] + pub(crate) async fn send_federation_request( + &self, + destination: &ServerName, + request: T, + ) -> Result + where + T: OutgoingRequest + Debug, + { + debug!("Waiting for permit"); + let permit = self.maximum_requests.acquire().await; + debug!("Got permit"); + let response = tokio::time::timeout( + Duration::from_secs(2 * 60), + server_server::send_request( + destination, + request, + LogRequestError::Yes, + AllowLoopbackRequests::No, + ), + ) + .await + .map_err(|_| { + warn!("Timeout waiting for server response"); + Error::BadServerResponse("Timeout waiting for server response") + })?; + drop(permit); + + response + } + + /// Sends a request to an appservice + /// + /// Only returns None if there is no url specified in the appservice + /// registration file + #[tracing::instrument( + skip(self, registration, request), + fields(appservice_id = registration.id), + )] + pub(crate) async fn send_appservice_request( + &self, + registration: Registration, + request: T, + ) -> Result> + where + T: OutgoingRequest + Debug, + { + let permit = self.maximum_requests.acquire().await; + let response = + appservice_server::send_request(registration, request).await; + drop(permit); + + response + } +} + +#[tracing::instrument(skip(events))] +async fn handle_appservice_event( + id: &str, + events: Vec, +) -> Result<()> { + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdu_jsons.push( + services() + .rooms + .timeline + .get_pdu_from_id(pdu_id)? + .ok_or_else(|| { + Error::bad_database( + "[Appservice] Event in servernameevent_data \ + not found in db.", + ) + })? + .to_room_event(), + ); + } + SendingEventType::Edu(_) => { + // TODO: send EDUs in + // `appservice::event::push_events::v1::Request::ephemeral` if + // enabled in registration + } + } + } + + let permit = services().sending.maximum_requests.acquire().await; + + appservice_server::send_request( + services().appservice.get_registration(id).await.ok_or_else(|| { + Error::bad_database( + "[Appservice] Could not load registration from db.", + ) + })?, + appservice::event::push_events::v1::Request { + events: pdu_jsons, + txn_id: general_purpose::URL_SAFE_NO_PAD + .encode(calculate_hash(events.iter().map(|e| match e { + SendingEventType::Edu(b) => b.json().get().as_bytes(), + SendingEventType::Pdu(b) => b.as_bytes(), + }))) + .into(), + ephemeral: Vec::new(), + }, + ) + .await?; + + drop(permit); + + Ok(()) +} + +#[tracing::instrument(skip(events))] +async fn handle_push_event( + userid: &UserId, + pushkey: &str, + events: Vec, +) -> Result<()> { + let mut pdus = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdus.push( + services() + .rooms + .timeline + .get_pdu_from_id(pdu_id)? + .ok_or_else(|| { + Error::bad_database( + "[Push] Event in servernamevent_datas not \ + found in db.", + ) + })?, + ); + } + // Push gateways don't need EDUs (?) + SendingEventType::Edu(_) => {} + } + } + + for pdu in pdus { + // Redacted events are not notification targets (we don't + // send push for them) + if let Some(unsigned) = &pdu.unsigned { + if let Ok(unsigned) = + serde_json::from_str::(unsigned.get()) + { + if unsigned.get("redacted_because").is_some() { + continue; + } + } + } + + let Some(pusher) = services().pusher.get_pusher(userid, pushkey)? + else { + continue; + }; + + let rules_for_user = services() + .account_data + .get_global::(userid) + .unwrap_or_default() + .and_then(|event| event.deserialize().ok()) + .map_or_else( + || push::Ruleset::server_default(userid), + |ev| ev.global, + ); + + let unread: UInt = services() + .rooms + .user + .notification_count(userid, &pdu.room_id)? + .try_into() + .expect("notification count can't go that high"); + + let permit = services().sending.maximum_requests.acquire().await; + + services() + .pusher + .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) + .await?; + + drop(permit); + } + + Ok(()) +} + +#[tracing::instrument(skip(events))] +async fn handle_federation_event( + server: &ServerName, + events: Vec, +) -> Result<()> { + let mut edu_jsons = Vec::new(); + let mut pdu_jsons = Vec::new(); + + if server == services().globals.server_name() { + warn!("Dropping outbound federation request to ourselves"); + return Ok(()); + } + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + // TODO: check room version and remove event_id if + // needed + pdu_jsons.push(PduEvent::convert_to_outgoing_federation_event( + services() + .rooms + .timeline + .get_pdu_json_from_id(pdu_id)? + .ok_or_else(|| { + error!(pdu_id = ?pdu_id, "PDU not found"); + Error::bad_database( + "[Normal] Event in servernamevent_datas not \ + found in db.", + ) + })?, + )); + } + SendingEventType::Edu(edu) => { + edu_jsons.push(edu.clone()); + } + } + } + + let permit = services().sending.maximum_requests.acquire().await; + + let response = server_server::send_request( + server, + send_transaction_message::v1::Request { + origin: services().globals.server_name().to_owned(), + pdus: pdu_jsons, + edus: edu_jsons, + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + transaction_id: general_purpose::URL_SAFE_NO_PAD + .encode(calculate_hash(events.iter().map(|e| match e { + SendingEventType::Edu(b) => b.json().get().as_bytes(), + SendingEventType::Pdu(b) => b.as_bytes(), + }))) + .into(), + }, + LogRequestError::No, + AllowLoopbackRequests::No, + ) + .await?; + + for pdu in response.pdus { + if let (event_id, Err(error)) = pdu { + warn!(%server, %event_id, %error, "Failed to send event to server"); + } + } + + drop(permit); + + Ok(()) +} + +#[tracing::instrument(skip_all)] +async fn handle_events( + HandlerInputs { + destination, + events, + requester_span, + }: HandlerInputs, +) -> HandlerResponse { + if let Some(span) = requester_span { + // clone() is required for the relationship to show up in jaeger + Span::current().follows_from(span.clone()); + } + + let result = match &destination { + Destination::Appservice(id) => { + handle_appservice_event(id, events).await + } + Destination::Push(userid, pushkey) => { + handle_push_event(userid, pushkey, events).await + } + Destination::Normal(server) => { + handle_federation_event(server, events).await + } + }; + + HandlerResponse { + destination, + result, + handler_span: Span::current(), + } +} diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 8b4d236f..39ea51d0 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,30 +1,41 @@ use ruma::ServerName; +use super::{Destination, RequestKey, SendingEventType}; use crate::Result; -use super::{OutgoingKind, SendingEventType}; - -pub trait Data: Send + Sync { +pub(crate) trait Data: Send + Sync { #[allow(clippy::type_complexity)] fn active_requests<'a>( &'a self, - ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; + ) -> Box< + dyn Iterator> + + 'a, + >; fn active_requests_for<'a>( &'a self, - outgoing_kind: &OutgoingKind, - ) -> Box, SendingEventType)>> + 'a>; - fn delete_active_request(&self, key: Vec) -> Result<()>; - fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; - fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + destination: &Destination, + ) -> Box> + 'a>; + fn delete_active_request(&self, key: RequestKey) -> Result<()>; + fn delete_all_active_requests_for( + &self, + destination: &Destination, + ) -> Result<()>; fn queue_requests( &self, - requests: &[(&OutgoingKind, SendingEventType)], - ) -> Result>>; + requests: &[(&Destination, &SendingEventType)], + ) -> Result>; fn queued_requests<'a>( &'a self, - outgoing_kind: &OutgoingKind, - ) -> Box)>> + 'a>; - fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; - fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; + destination: &Destination, + ) -> Box> + 'a>; + fn mark_as_active( + &self, + events: &[(SendingEventType, RequestKey)], + ) -> Result<()>; + fn set_latest_educount( + &self, + server_name: &ServerName, + educount: u64, + ) -> Result<()>; fn get_latest_educount(&self, server_name: &ServerName) -> Result; } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs deleted file mode 100644 index 7e54e8b4..00000000 --- a/src/service/sending/mod.rs +++ /dev/null @@ -1,721 +0,0 @@ -mod data; - -pub use data::Data; - -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - fmt::Debug, - sync::Arc, - time::{Duration, Instant}, -}; - -use crate::{ - api::{appservice_server, server_server}, - services, - utils::calculate_hash, - Config, Error, PduEvent, Result, -}; -use federation::transactions::send_transaction_message; -use futures_util::{stream::FuturesUnordered, StreamExt}; - -use base64::{engine::general_purpose, Engine as _}; - -use ruma::{ - api::{ - appservice::{self, Registration}, - federation::{ - self, - transactions::edu::{ - DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, - }, - }, - OutgoingRequest, - }, - device_id, - events::{ - push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, - GlobalAccountDataEventType, - }, - push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, UInt, UserId, -}; -use tokio::{ - select, - sync::{mpsc, Mutex, Semaphore}, -}; -use tracing::{debug, error, warn}; - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum OutgoingKind { - Appservice(String), - Push(OwnedUserId, String), // user and pushkey - Normal(OwnedServerName), -} - -impl OutgoingKind { - #[tracing::instrument(skip(self))] - pub fn get_prefix(&self) -> Vec { - let mut prefix = match self { - OutgoingKind::Appservice(server) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(server.as_bytes()); - p - } - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(user.as_bytes()); - p.push(0xff); - p.extend_from_slice(pushkey.as_bytes()); - p - } - OutgoingKind::Normal(server) => { - let mut p = Vec::new(); - p.extend_from_slice(server.as_bytes()); - p - } - }; - prefix.push(0xff); - - prefix - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum SendingEventType { - Pdu(Vec), // pduid - Edu(Vec), // pdu json -} - -pub struct Service { - db: &'static dyn Data, - - /// The state for a given state hash. - pub(super) maximum_requests: Arc, - pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, - receiver: Mutex)>>, -} - -enum TransactionStatus { - Running, - Failed(u32, Instant), // number of times failed, time of last failure - Retrying(u32), // number of times failed -} - -impl Service { - pub fn build(db: &'static dyn Data, config: &Config) -> Arc { - let (sender, receiver) = mpsc::unbounded_channel(); - Arc::new(Self { - db, - sender, - receiver: Mutex::new(receiver), - maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - }) - } - - pub fn start_handler(self: &Arc) { - let self2 = Arc::clone(self); - tokio::spawn(async move { - self2.handler().await.unwrap(); - }); - } - - async fn handler(&self) -> Result<()> { - let mut receiver = self.receiver.lock().await; - - let mut futures = FuturesUnordered::new(); - - let mut current_transaction_status = HashMap::::new(); - - // Retry requests we could not finish yet - let mut initial_transactions = HashMap::>::new(); - - for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { - let entry = initial_transactions - .entry(outgoing_kind.clone()) - .or_default(); - - if entry.len() > 30 { - warn!( - "Dropping some current events: {:?} {:?} {:?}", - key, outgoing_kind, event - ); - self.db.delete_active_request(key)?; - continue; - } - - entry.push(event); - } - - for (outgoing_kind, events) in initial_transactions { - current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); - futures.push(Self::handle_events(outgoing_kind.clone(), events)); - } - - loop { - select! { - Some(response) = futures.next() => { - match response { - Ok(outgoing_kind) => { - self.db.delete_all_active_requests_for(&outgoing_kind)?; - - // Find events that have been added since starting the last request - let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); - - if !new_events.is_empty() { - // Insert pdus we found - self.db.mark_as_active(&new_events)?; - - futures.push( - Self::handle_events( - outgoing_kind.clone(), - new_events.into_iter().map(|(event, _)| event).collect(), - ) - ); - } else { - current_transaction_status.remove(&outgoing_kind); - } - } - Err((outgoing_kind, _)) => { - current_transaction_status.entry(outgoing_kind).and_modify(|e| *e = match e { - TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), - TransactionStatus::Failed(_, _) => { - error!("Request that was not even running failed?!"); - return - }, - }); - } - }; - }, - Some((outgoing_kind, event, key)) = receiver.recv() => { - if let Ok(Some(events)) = self.select_events( - &outgoing_kind, - vec![(event, key)], - &mut current_transaction_status, - ) { - futures.push(Self::handle_events(outgoing_kind, events)); - } - } - } - } - } - - #[tracing::instrument(skip(self, outgoing_kind, new_events, current_transaction_status))] - fn select_events( - &self, - outgoing_kind: &OutgoingKind, - new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key - current_transaction_status: &mut HashMap, - ) -> Result>> { - let mut retry = false; - let mut allow = true; - - let entry = current_transaction_status.entry(outgoing_kind.clone()); - - entry - .and_modify(|e| match e { - TransactionStatus::Running | TransactionStatus::Retrying(_) => { - allow = false; // already running - } - TransactionStatus::Failed(tries, time) => { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - allow = false; - } else { - retry = true; - *e = TransactionStatus::Retrying(*tries); - } - } - }) - .or_insert(TransactionStatus::Running); - - if !allow { - return Ok(None); - } - - let mut events = Vec::new(); - - if retry { - // We retry the previous transaction - for (_, e) in self - .db - .active_requests_for(outgoing_kind) - .filter_map(|r| r.ok()) - { - events.push(e); - } - } else { - self.db.mark_as_active(&new_events)?; - for (e, _) in new_events { - events.push(e); - } - - if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = self.select_edus(server_name) { - events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - - self.db.set_latest_educount(server_name, last_count)?; - } - } - } - - Ok(Some(events)) - } - - #[tracing::instrument(skip(self, server_name))] - pub fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { - // u64: count of last edu - let since = self.db.get_latest_educount(server_name)?; - let mut events = Vec::new(); - let mut max_edu_count = since; - let mut device_list_changes = HashSet::new(); - - 'outer: for room_id in services().rooms.state_cache.server_rooms(server_name) { - let room_id = room_id?; - // Look for device list updates in this room - device_list_changes.extend( - services() - .users - .keys_changed(room_id.as_ref(), since, None) - .filter_map(|r| r.ok()) - .filter(|user_id| user_id.server_name() == services().globals.server_name()), - ); - - // Look for read receipts in this room - for r in services() - .rooms - .edus - .read_receipt - .readreceipts_since(&room_id, since) - { - let (user_id, count, read_receipt) = r?; - - if count > max_edu_count { - max_edu_count = count; - } - - if user_id.server_name() != services().globals.server_name() { - continue; - } - - let event: AnySyncEphemeralRoomEvent = - serde_json::from_str(read_receipt.json().get()) - .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; - let federation_event = match event { - AnySyncEphemeralRoomEvent::Receipt(r) => { - let mut read = BTreeMap::new(); - - let (event_id, mut receipt) = r - .content - .0 - .into_iter() - .next() - .expect("we only use one event per read receipt"); - let receipt = receipt - .remove(&ReceiptType::Read) - .expect("our read receipts always set this") - .remove(&user_id) - .expect("our read receipts always have the user here"); - - read.insert( - user_id, - ReceiptData { - data: receipt.clone(), - event_ids: vec![event_id.clone()], - }, - ); - - let receipt_map = ReceiptMap { read }; - - let mut receipts = BTreeMap::new(); - receipts.insert(room_id.clone(), receipt_map); - - Edu::Receipt(ReceiptContent { receipts }) - } - _ => { - Error::bad_database("Invalid event type in read_receipts"); - continue; - } - }; - - events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); - - if events.len() >= 20 { - break 'outer; - } - } - } - - for user_id in device_list_changes { - // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767 - // Because synapse resyncs, we can just insert dummy data - let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { - user_id, - device_id: device_id!("dummy").to_owned(), - device_display_name: Some("Dummy".to_owned()), - stream_id: uint!(1), - prev_id: Vec::new(), - deleted: None, - keys: None, - }); - - events.push(serde_json::to_vec(&edu).expect("json can be serialized")); - } - - Ok((events, max_edu_count)) - } - - #[tracing::instrument(skip(self, pdu_id, user, pushkey))] - pub fn send_push_pdu(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { - let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); - let event = SendingEventType::Pdu(pdu_id.to_owned()); - let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender - .send((outgoing_kind, event, keys.into_iter().next().unwrap())) - .unwrap(); - - Ok(()) - } - - #[tracing::instrument(skip(self, servers, pdu_id))] - pub fn send_pdu>( - &self, - servers: I, - pdu_id: &[u8], - ) -> Result<()> { - let requests = servers - .into_iter() - .map(|server| { - ( - OutgoingKind::Normal(server), - SendingEventType::Pdu(pdu_id.to_owned()), - ) - }) - .collect::>(); - let keys = self.db.queue_requests( - &requests - .iter() - .map(|(o, e)| (o, e.clone())) - .collect::>(), - )?; - for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { - self.sender - .send((outgoing_kind.to_owned(), event, key)) - .unwrap(); - } - - Ok(()) - } - - #[tracing::instrument(skip(self, server, serialized))] - pub fn send_reliable_edu( - &self, - server: &ServerName, - serialized: Vec, - id: u64, - ) -> Result<()> { - let outgoing_kind = OutgoingKind::Normal(server.to_owned()); - let event = SendingEventType::Edu(serialized); - let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender - .send((outgoing_kind, event, keys.into_iter().next().unwrap())) - .unwrap(); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { - let outgoing_kind = OutgoingKind::Appservice(appservice_id); - let event = SendingEventType::Pdu(pdu_id); - let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender - .send((outgoing_kind, event, keys.into_iter().next().unwrap())) - .unwrap(); - - Ok(()) - } - - /// Cleanup event data - /// Used for instance after we remove an appservice registration - /// - #[tracing::instrument(skip(self))] - pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { - self.db - .delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; - - Ok(()) - } - - #[tracing::instrument(skip(events, kind))] - async fn handle_events( - kind: OutgoingKind, - events: Vec, - ) -> Result { - match &kind { - OutgoingKind::Appservice(id) => { - let mut pdu_jsons = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(services().rooms.timeline - .get_pdu_from_id(pdu_id) - .map_err(|e| (kind.clone(), e))? - .ok_or_else(|| { - ( - kind.clone(), - Error::bad_database( - "[Appservice] Event in servernameevent_data not found in db.", - ), - ) - })? - .to_room_event()) - } - SendingEventType::Edu(_) => { - // Appservices don't need EDUs (?) - } - } - } - - let permit = services().sending.maximum_requests.acquire().await; - - let response = match appservice_server::send_request( - services() - .appservice - .get_registration(id) - .await - .ok_or_else(|| { - ( - kind.clone(), - Error::bad_database( - "[Appservice] Could not load registration from db.", - ), - ) - })?, - appservice::event::push_events::v1::Request { - events: pdu_jsons, - txn_id: (&*general_purpose::URL_SAFE_NO_PAD.encode(calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ))) - .into(), - }, - ) - .await - { - Ok(_) => Ok(kind.clone()), - Err(e) => Err((kind.clone(), e)), - }; - - drop(permit); - - response - } - OutgoingKind::Push(userid, pushkey) => { - let mut pdus = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - pdus.push( - services().rooms - .timeline - .get_pdu_from_id(pdu_id) - .map_err(|e| (kind.clone(), e))? - .ok_or_else(|| { - ( - kind.clone(), - Error::bad_database( - "[Push] Event in servernamevent_datas not found in db.", - ), - ) - })?, - ); - } - SendingEventType::Edu(_) => { - // Push gateways don't need EDUs (?) - } - } - } - - for pdu in pdus { - // Redacted events are not notification targets (we don't send push for them) - if let Some(unsigned) = &pdu.unsigned { - if let Ok(unsigned) = - serde_json::from_str::(unsigned.get()) - { - if unsigned.get("redacted_because").is_some() { - continue; - } - } - } - - let pusher = match services() - .pusher - .get_pusher(userid, pushkey) - .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? - { - Some(pusher) => pusher, - None => continue, - }; - - let rules_for_user = services() - .account_data - .get( - None, - userid, - GlobalAccountDataEventType::PushRules.to_string().into(), - ) - .unwrap_or_default() - .and_then(|event| serde_json::from_str::(event.get()).ok()) - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(userid)); - - let unread: UInt = services() - .rooms - .user - .notification_count(userid, &pdu.room_id) - .map_err(|e| (kind.clone(), e))? - .try_into() - .expect("notification count can't go that high"); - - let permit = services().sending.maximum_requests.acquire().await; - - let _response = services() - .pusher - .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); - - drop(permit); - } - Ok(OutgoingKind::Push(userid.clone(), pushkey.clone())) - } - OutgoingKind::Normal(server) => { - let mut edu_jsons = Vec::new(); - let mut pdu_jsons = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - // TODO: check room version and remove event_id if needed - let raw = PduEvent::convert_to_outgoing_federation_event( - services().rooms - .timeline - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? - .ok_or_else(|| { - error!("event not found: {server} {pdu_id:?}"); - ( - OutgoingKind::Normal(server.clone()), - Error::bad_database( - "[Normal] Event in servernamevent_datas not found in db.", - ), - ) - })?, - ); - pdu_jsons.push(raw); - } - SendingEventType::Edu(edu) => { - if let Ok(raw) = serde_json::from_slice(edu) { - edu_jsons.push(raw); - } - } - } - } - - let permit = services().sending.maximum_requests.acquire().await; - - let response = server_server::send_request( - server, - send_transaction_message::v1::Request { - origin: services().globals.server_name().to_owned(), - pdus: pdu_jsons, - edus: edu_jsons, - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: (&*general_purpose::URL_SAFE_NO_PAD.encode( - calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ), - )) - .into(), - }, - ) - .await - .map(|response| { - for pdu in response.pdus { - if pdu.1.is_err() { - warn!("Failed to send to {}: {:?}", server, pdu); - } - } - kind.clone() - }) - .map_err(|e| (kind, e)); - - drop(permit); - - response - } - } - } - - #[tracing::instrument(skip(self, destination, request))] - pub async fn send_federation_request( - &self, - destination: &ServerName, - request: T, - ) -> Result - where - T: Debug, - { - debug!("Waiting for permit"); - let permit = self.maximum_requests.acquire().await; - debug!("Got permit"); - let response = tokio::time::timeout( - Duration::from_secs(2 * 60), - server_server::send_request(destination, request), - ) - .await - .map_err(|_| { - warn!("Timeout waiting for server response of {destination}"); - Error::BadServerResponse("Timeout waiting for server response") - })?; - drop(permit); - - response - } - - /// Sends a request to an appservice - /// - /// Only returns None if there is no url specified in the appservice registration file - #[tracing::instrument(skip(self, registration, request))] - pub async fn send_appservice_request( - &self, - registration: Registration, - request: T, - ) -> Result> - where - T: Debug, - { - let permit = self.maximum_requests.acquire().await; - let response = appservice_server::send_request(registration, request).await; - drop(permit); - - response - } -} diff --git a/src/service/transaction_ids.rs b/src/service/transaction_ids.rs new file mode 100644 index 00000000..4f4b4344 --- /dev/null +++ b/src/service/transaction_ids.rs @@ -0,0 +1,5 @@ +mod data; + +pub(crate) use data::Data; + +pub(crate) type Service = &'static dyn Data; diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index 74855318..e7012956 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,7 +1,8 @@ -use crate::Result; use ruma::{DeviceId, TransactionId, UserId}; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs deleted file mode 100644 index 2fa3b02e..00000000 --- a/src/service/transaction_ids/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -mod data; - -pub use data::Data; - -use crate::Result; -use ruma::{DeviceId, TransactionId, UserId}; - -pub struct Service { - pub db: &'static dyn Data, -} - -impl Service { - pub fn add_txnid( - &self, - user_id: &UserId, - device_id: Option<&DeviceId>, - txn_id: &TransactionId, - data: &[u8], - ) -> Result<()> { - self.db.add_txnid(user_id, device_id, txn_id, data) - } - - pub fn existing_txnid( - &self, - user_id: &UserId, - device_id: Option<&DeviceId>, - txn_id: &TransactionId, - ) -> Result>> { - self.db.existing_txnid(user_id, device_id, txn_id) - } -} diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa.rs similarity index 51% rename from src/service/uiaa/mod.rs rename to src/service/uiaa.rs index ed39af99..566db934 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa.rs @@ -1,37 +1,59 @@ -mod data; - -pub use data::Data; +use std::{collections::BTreeMap, sync::RwLock}; use ruma::{ api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, UserId, + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; use tracing::error; -use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; +use crate::{ + api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result, +}; -pub struct Service { - pub db: &'static dyn Data, +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + db: &'static dyn Data, + userdevicesessionid_uiaarequest: RwLock< + BTreeMap<(OwnedUserId, OwnedDeviceId, String), CanonicalJsonValue>, + >, } impl Service { + pub(crate) fn new(db: &'static dyn Data) -> Self { + Self { + db, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), + } + } + /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( + pub(crate) fn create( &self, user_id: &UserId, device_id: &DeviceId, uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue, ) -> Result<()> { - self.db.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; + self.userdevicesessionid_uiaarequest.write().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + // TODO: better session error handling (why is it optional in + // ruma?) + uiaainfo + .session + .as_ref() + .expect("session should be set") + .to_owned(), + ), + json_body.to_owned(), + ); self.db.update_uiaa_session( user_id, device_id, @@ -40,17 +62,17 @@ impl Service { ) } - pub fn try_auth( + pub(crate) fn try_auth( &self, user_id: &UserId, device_id: &DeviceId, auth: &AuthData, uiaainfo: &UiaaInfo, ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.db.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; + let mut uiaainfo = auth.session().map_or_else( + || Ok(uiaainfo.clone()), + |session| self.db.get_uiaa_session(user_id, device_id, session), + )?; if uiaainfo.session.is_none() { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); @@ -63,32 +85,34 @@ impl Service { password, .. }) => { - let username = match identifier { - UserIdentifier::UserIdOrLocalpart(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } + let UserIdentifier::UserIdOrLocalpart(username) = identifier + else { + return Err(Error::BadRequest( + ErrorKind::Unrecognized, + "Identifier type not recognized.", + )); }; let user_id = UserId::parse_with_server_name( username.clone(), services().globals.server_name(), ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + .map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "User ID is invalid.", + ) + })?; // Check if password is correct if let Some(hash) = services().users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); + if !utils::verify_password(hash, password) { + uiaainfo.auth_error = + Some(ruma::api::client::error::StandardErrorBody { + kind: ErrorKind::forbidden(), + message: "Invalid username or password." + .to_owned(), + }); return Ok((false, uiaainfo)); } } @@ -97,20 +121,23 @@ impl Service { uiaainfo.completed.push(AuthType::Password); } AuthData::RegistrationToken(t) => { - if Some(t.token.trim()) == services().globals.config.registration_token.as_deref() { + if Some(t.token.trim()) + == services().globals.config.registration_token.as_deref() + { uiaainfo.completed.push(AuthType::RegistrationToken); } else { - uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid registration token.".to_owned(), - }); + uiaainfo.auth_error = + Some(ruma::api::client::error::StandardErrorBody { + kind: ErrorKind::forbidden(), + message: "Invalid registration token.".to_owned(), + }); return Ok((false, uiaainfo)); } } AuthData::Dummy(_) => { uiaainfo.completed.push(AuthType::Dummy); } - k => error!("type not supported: {:?}", k), + kind => error!(?kind, "Auth kind not supported"), } // Check if a flow now succeeds @@ -145,12 +172,20 @@ impl Service { Ok((true, uiaainfo)) } - pub fn get_uiaa_request( + pub(crate) fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, ) -> Option { - self.db.get_uiaa_request(user_id, device_id, session) + self.userdevicesessionid_uiaarequest + .read() + .unwrap() + .get(&( + user_id.to_owned(), + device_id.to_owned(), + session.to_owned(), + )) + .map(ToOwned::to_owned) } } diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index c64deb90..2af82e11 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,22 +1,8 @@ +use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId}; + use crate::Result; -use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; - -pub trait Data: Send + Sync { - fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, - ) -> Result<()>; - - fn get_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Option; +pub(crate) trait Data: Send + Sync { fn update_uiaa_session( &self, user_id: &UserId, diff --git a/src/service/users.rs b/src/service/users.rs new file mode 100644 index 00000000..dbacb715 --- /dev/null +++ b/src/service/users.rs @@ -0,0 +1,461 @@ +use std::{collections::BTreeMap, mem}; + +use ruma::{ + api::client::{device::Device, filter::FilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, OneTimeKeyAlgorithm, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, + OwnedMxcUri, OwnedOneTimeKeyId, OwnedUserId, UInt, UserId, +}; + +use crate::{services, Error, Result}; + +mod data; + +pub(crate) use data::Data; + +pub(crate) struct Service { + pub(crate) db: &'static dyn Data, +} + +impl Service { + pub(crate) fn new(db: &'static dyn Data) -> Self { + Self { + db, + } + } + + /// Check if a user has an account on this homeserver. + pub(crate) fn exists(&self, user_id: &UserId) -> Result { + self.db.exists(user_id) + } + + /// Check if account is deactivated + pub(crate) fn is_deactivated(&self, user_id: &UserId) -> Result { + self.db.is_deactivated(user_id) + } + + /// Check if a user is an admin + // Allowed because this function uses `services()` + #[allow(clippy::unused_self)] + pub(crate) fn is_admin(&self, user_id: &UserId) -> Result { + services().admin.get_admin_room()?.map_or(Ok(false), |admin_room_id| { + services().rooms.state_cache.is_joined(user_id, &admin_room_id) + }) + } + + /// Create a new user account on this homeserver. + pub(crate) fn create( + &self, + user_id: &UserId, + password: Option<&str>, + ) -> Result<()> { + self.db.set_password(user_id, password)?; + Ok(()) + } + + /// Returns the number of local and remote users known by this server. + pub(crate) fn count(&self) -> Result { + self.db.count() + } + + /// Find out which user an access token belongs to. + pub(crate) fn find_from_token( + &self, + token: &str, + ) -> Result> { + self.db.find_from_token(token) + } + + /// Returns an iterator over all local and remote users on this homeserver. + pub(crate) fn iter( + &self, + ) -> impl Iterator> + '_ { + self.db.iter() + } + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is + /// greater then zero. + pub(crate) fn list_local_users(&self) -> Result> { + self.db.list_local_users() + } + + /// Returns the password hash for the given user. + pub(crate) fn password_hash( + &self, + user_id: &UserId, + ) -> Result> { + self.db.password_hash(user_id) + } + + /// Hash and set the user's password to the Argon2 hash + pub(crate) fn set_password( + &self, + user_id: &UserId, + password: Option<&str>, + ) -> Result<()> { + self.db.set_password(user_id, password) + } + + /// Returns the displayname of a user on this homeserver. + pub(crate) fn displayname( + &self, + user_id: &UserId, + ) -> Result> { + self.db.displayname(user_id) + } + + /// Sets a new displayname or removes it if displayname is None. You still + /// need to nofify all rooms of this change. + pub(crate) fn set_displayname( + &self, + user_id: &UserId, + displayname: Option, + ) -> Result<()> { + self.db.set_displayname(user_id, displayname) + } + + /// Get the `avatar_url` of a user. + pub(crate) fn avatar_url( + &self, + user_id: &UserId, + ) -> Result> { + self.db.avatar_url(user_id) + } + + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + pub(crate) fn set_avatar_url( + &self, + user_id: &UserId, + avatar_url: Option, + ) -> Result<()> { + self.db.set_avatar_url(user_id, avatar_url) + } + + /// Get the `blurhash` of a user. + pub(crate) fn blurhash(&self, user_id: &UserId) -> Result> { + self.db.blurhash(user_id) + } + + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + pub(crate) fn set_blurhash( + &self, + user_id: &UserId, + blurhash: Option, + ) -> Result<()> { + self.db.set_blurhash(user_id, blurhash) + } + + /// Adds a new device to a user. + pub(crate) fn create_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + initial_device_display_name: Option, + ) -> Result<()> { + self.db.create_device( + user_id, + device_id, + token, + initial_device_display_name, + ) + } + + /// Removes a device from a user. + pub(crate) fn remove_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result<()> { + self.db.remove_device(user_id, device_id) + } + + /// Returns an iterator over all device ids of this user. + pub(crate) fn all_device_ids<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.all_device_ids(user_id) + } + + /// Replaces the access token of one device. + pub(crate) fn set_token( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + ) -> Result<()> { + self.db.set_token(user_id, device_id, token) + } + + pub(crate) fn add_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + one_time_key_key: &OwnedKeyId, + one_time_key_value: &Raw, + ) -> Result<()> { + self.db.add_one_time_key( + user_id, + device_id, + one_time_key_key, + one_time_key_value, + ) + } + + pub(crate) fn take_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + key_algorithm: &OneTimeKeyAlgorithm, + ) -> Result)>> { + self.db.take_one_time_key(user_id, device_id, key_algorithm) + } + + pub(crate) fn count_one_time_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + self.db.count_one_time_keys(user_id, device_id) + } + + pub(crate) fn add_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + device_keys: &Raw, + ) -> Result<()> { + self.db.add_device_keys(user_id, device_id, device_keys) + } + + pub(crate) fn add_cross_signing_keys( + &self, + user_id: &UserId, + master_key: &Raw, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, + notify: bool, + ) -> Result<()> { + self.db.add_cross_signing_keys( + user_id, + master_key, + self_signing_key, + user_signing_key, + notify, + ) + } + + pub(crate) fn sign_key( + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, + ) -> Result<()> { + self.db.sign_key(target_id, key_id, signature, sender_id) + } + + pub(crate) fn keys_changed<'a>( + &'a self, + user_or_room_id: &str, + from: u64, + to: Option, + ) -> impl Iterator> + 'a { + self.db.keys_changed(user_or_room_id, from, to) + } + + pub(crate) fn mark_device_key_update( + &self, + user_id: &UserId, + ) -> Result<()> { + self.db.mark_device_key_update(user_id) + } + + pub(crate) fn get_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>> { + self.db.get_device_keys(user_id, device_id) + } + + pub(crate) fn parse_master_key( + &self, + user_id: &UserId, + master_key: &Raw, + ) -> Result<(Vec, CrossSigningKey)> { + self.db.parse_master_key(user_id, master_key) + } + + pub(crate) fn get_key( + &self, + key: &[u8], + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db.get_key(key, sender_user, user_id, allowed_signatures) + } + + pub(crate) fn get_master_key( + &self, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db.get_master_key(sender_user, user_id, allowed_signatures) + } + + pub(crate) fn get_self_signing_key( + &self, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db.get_self_signing_key(sender_user, user_id, allowed_signatures) + } + + pub(crate) fn get_user_signing_key( + &self, + user_id: &UserId, + ) -> Result>> { + self.db.get_user_signing_key(user_id) + } + + pub(crate) fn add_to_device_event( + &self, + sender: &UserId, + target_user_id: &UserId, + target_device_id: &DeviceId, + event_type: &str, + content: serde_json::Value, + ) -> Result<()> { + self.db.add_to_device_event( + sender, + target_user_id, + target_device_id, + event_type, + content, + ) + } + + pub(crate) fn get_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>> { + self.db.get_to_device_events(user_id, device_id) + } + + pub(crate) fn remove_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + until: u64, + ) -> Result<()> { + self.db.remove_to_device_events(user_id, device_id, until) + } + + pub(crate) fn update_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + device: &Device, + ) -> Result<()> { + self.db.update_device_metadata(user_id, device_id, device) + } + + /// Get device metadata. + pub(crate) fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + self.db.get_device_metadata(user_id, device_id) + } + + pub(crate) fn get_devicelist_version( + &self, + user_id: &UserId, + ) -> Result> { + self.db.get_devicelist_version(user_id) + } + + pub(crate) fn all_devices_metadata<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.all_devices_metadata(user_id) + } + + /// Deactivate account + pub(crate) fn deactivate_account(&self, user_id: &UserId) -> Result<()> { + // Remove all associated devices + for device_id in self.all_device_ids(user_id) { + self.remove_device(user_id, &device_id?)?; + } + + // Set the password to "" to indicate a deactivated account. Hashes will + // never result in an empty string, so the user will not be able + // to log in again. Systems like changing the password without + // logging in should check if the account is deactivated. + self.db.set_password(user_id, None)?; + + // TODO: Unhook 3PID + Ok(()) + } + + /// Creates a new sync filter. Returns the filter id. + pub(crate) fn create_filter( + &self, + user_id: &UserId, + filter: &FilterDefinition, + ) -> Result { + self.db.create_filter(user_id, filter) + } + + pub(crate) fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { + self.db.get_filter(user_id, filter_id) + } +} + +/// Ensure that a user only sees signatures from themselves and the target user +pub(crate) fn clean_signatures bool>( + cross_signing_key: &mut serde_json::Value, + sender_user: Option<&UserId>, + user_id: &UserId, + allowed_signatures: F, +) -> Result<(), Error> { + if let Some(signatures) = + cross_signing_key.get_mut("signatures").and_then(|v| v.as_object_mut()) + { + // Don't allocate for the full size of the current signatures, but + // require at most one resize if nothing is dropped + let new_capacity = signatures.len() / 2; + for (user, signature) in mem::replace( + signatures, + serde_json::Map::with_capacity(new_capacity), + ) { + let sid = <&UserId>::try_from(user.as_str()).map_err(|_| { + Error::bad_database("Invalid user ID in database.") + })?; + if sender_user == Some(user_id) + || sid == user_id + || allowed_signatures(sid) + { + signatures.insert(user, signature); + } + } + } + + Ok(()) +} diff --git a/src/service/users/data.rs b/src/service/users/data.rs index ddf941e3..579bdfb0 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,58 +1,82 @@ -use crate::Result; +use std::collections::BTreeMap; + use ruma::{ api::client::{device::Device, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, - OwnedUserId, UInt, UserId, + DeviceId, OneTimeKeyAlgorithm, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, + OwnedMxcUri, OwnedOneTimeKeyId, OwnedUserId, UInt, UserId, }; -use std::collections::BTreeMap; -pub trait Data: Send + Sync { +use crate::Result; + +pub(crate) trait Data: Send + Sync { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result; /// Check if account is deactivated fn is_deactivated(&self, user_id: &UserId) -> Result; - /// Returns the number of users registered on this server. + /// Returns the number of local and remote users known by this server. fn count(&self) -> Result; /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result>; + fn find_from_token( + &self, + token: &str, + ) -> Result>; - /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box> + 'a>; + /// Returns an iterator over all local and remote users on this homeserver. + fn iter<'a>(&'a self) + -> Box> + 'a>; /// Returns a list of local users as list of usernames. /// - /// A user account is considered `local` if the length of it's password is greater then zero. + /// A user account is considered `local` if the length of it's password is + /// greater then zero. fn list_local_users(&self) -> Result>; /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result>; /// Hash and set the user's password to the Argon2 hash - fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + fn set_password( + &self, + user_id: &UserId, + password: Option<&str>, + ) -> Result<()>; /// Returns the displayname of a user on this homeserver. fn displayname(&self, user_id: &UserId) -> Result>; - /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; + /// Sets a new `displayname` or removes it if `displayname` is `None`. You + /// still need to nofify all rooms of this change. + fn set_displayname( + &self, + user_id: &UserId, + displayname: Option, + ) -> Result<()>; - /// Get the avatar_url of a user. + /// Get the `avatar_url` of a user. fn avatar_url(&self, user_id: &UserId) -> Result>; - /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()>; + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + fn set_avatar_url( + &self, + user_id: &UserId, + avatar_url: Option, + ) -> Result<()>; - /// Get the blurhash of a user. + /// Get the `blurhash` of a user. fn blurhash(&self, user_id: &UserId) -> Result>; - /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; + /// Sets a new `avatar_url` or removes it if `avatar_url` is `None`. + fn set_blurhash( + &self, + user_id: &UserId, + blurhash: Option, + ) -> Result<()>; /// Adds a new device to a user. fn create_device( @@ -64,7 +88,11 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Removes a device from a user. - fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn remove_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result<()>; /// Returns an iterator over all device ids of this user. fn all_device_ids<'a>( @@ -73,30 +101,33 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Replaces the access token of one device. - fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; + fn set_token( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + ) -> Result<()>; fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, - one_time_key_key: &DeviceKeyId, + one_time_key_key: &OwnedKeyId, one_time_key_value: &Raw, ) -> Result<()>; - fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; - fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, - key_algorithm: &DeviceKeyAlgorithm, - ) -> Result)>>; + key_algorithm: &OneTimeKeyAlgorithm, + ) -> Result)>>; fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>; + ) -> Result>; fn add_device_keys( &self, @@ -109,8 +140,8 @@ pub trait Data: Send + Sync { &self, user_id: &UserId, master_key: &Raw, - self_signing_key: &Option>, - user_signing_key: &Option>, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, notify: bool, ) -> Result<()>; @@ -165,7 +196,10 @@ pub trait Data: Send + Sync { allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; - fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; + fn get_user_signing_key( + &self, + user_id: &UserId, + ) -> Result>>; fn add_to_device_event( &self, @@ -197,8 +231,11 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Get device metadata. - fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) - -> Result>; + fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>; fn get_devicelist_version(&self, user_id: &UserId) -> Result>; @@ -208,7 +245,15 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Creates a new sync filter. Returns the filter id. - fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result; + fn create_filter( + &self, + user_id: &UserId, + filter: &FilterDefinition, + ) -> Result; - fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result>; + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs deleted file mode 100644 index fb983a41..00000000 --- a/src/service/users/mod.rs +++ /dev/null @@ -1,627 +0,0 @@ -mod data; -use std::{ - collections::{BTreeMap, BTreeSet}, - mem, - sync::{Arc, Mutex}, -}; - -pub use data::Data; -use ruma::{ - api::client::{ - device::Device, - error::ErrorKind, - filter::FilterDefinition, - sync::sync_events::{ - self, - v4::{ExtensionsConfig, SyncRequestList}, - }, - }, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::AnyToDeviceEvent, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, - OwnedRoomId, OwnedUserId, RoomAliasId, UInt, UserId, -}; - -use crate::{services, Error, Result}; - -pub struct SlidingSyncCache { - lists: BTreeMap, - subscriptions: BTreeMap, - known_rooms: BTreeMap>, // For every room, the roomsince number - extensions: ExtensionsConfig, -} - -pub struct Service { - pub db: &'static dyn Data, - #[allow(clippy::type_complexity)] - pub connections: - Mutex>>>, -} - -impl Service { - /// Check if a user has an account on this homeserver. - pub fn exists(&self, user_id: &UserId) -> Result { - self.db.exists(user_id) - } - - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .unwrap() - .remove(&(user_id, device_id, conn_id)); - } - - pub fn update_sync_request_with_cache( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - request: &mut sync_events::v4::Request, - ) -> BTreeMap> { - let Some(conn_id) = request.conn_id.clone() else { - return BTreeMap::new(); - }; - - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (list_id, list) in &mut request.lists { - if let Some(cached_list) = cached.lists.get(list_id) { - if list.sort.is_empty() { - list.sort = cached_list.sort.clone(); - }; - if list.room_details.required_state.is_empty() { - list.room_details.required_state = - cached_list.room_details.required_state.clone(); - }; - list.room_details.timeline_limit = list - .room_details - .timeline_limit - .or(cached_list.room_details.timeline_limit); - list.include_old_rooms = list - .include_old_rooms - .clone() - .or(cached_list.include_old_rooms.clone()); - match (&mut list.filters, cached_list.filters.clone()) { - (Some(list_filters), Some(cached_filters)) => { - list_filters.is_dm = list_filters.is_dm.or(cached_filters.is_dm); - if list_filters.spaces.is_empty() { - list_filters.spaces = cached_filters.spaces; - } - list_filters.is_encrypted = - list_filters.is_encrypted.or(cached_filters.is_encrypted); - list_filters.is_invite = - list_filters.is_invite.or(cached_filters.is_invite); - if list_filters.room_types.is_empty() { - list_filters.room_types = cached_filters.room_types; - } - if list_filters.not_room_types.is_empty() { - list_filters.not_room_types = cached_filters.not_room_types; - } - list_filters.room_name_like = list_filters - .room_name_like - .clone() - .or(cached_filters.room_name_like); - if list_filters.tags.is_empty() { - list_filters.tags = cached_filters.tags; - } - if list_filters.not_tags.is_empty() { - list_filters.not_tags = cached_filters.not_tags; - } - } - (_, Some(cached_filters)) => list.filters = Some(cached_filters), - (Some(list_filters), _) => list.filters = Some(list_filters.clone()), - (_, _) => {} - } - if list.bump_event_types.is_empty() { - list.bump_event_types = cached_list.bump_event_types.clone(); - }; - } - cached.lists.insert(list_id.clone(), list.clone()); - } - - cached.subscriptions.extend( - request - .room_subscriptions - .iter() - .map(|(k, v)| (k.clone(), v.clone())), - ); - request.room_subscriptions.extend( - cached - .subscriptions - .iter() - .map(|(k, v)| (k.clone(), v.clone())), - ); - - request.extensions.e2ee.enabled = request - .extensions - .e2ee - .enabled - .or(cached.extensions.e2ee.enabled); - - request.extensions.to_device.enabled = request - .extensions - .to_device - .enabled - .or(cached.extensions.to_device.enabled); - - request.extensions.account_data.enabled = request - .extensions - .account_data - .enabled - .or(cached.extensions.account_data.enabled); - request.extensions.account_data.lists = request - .extensions - .account_data - .lists - .clone() - .or(cached.extensions.account_data.lists.clone()); - request.extensions.account_data.rooms = request - .extensions - .account_data - .rooms - .clone() - .or(cached.extensions.account_data.rooms.clone()); - - cached.extensions = request.extensions.clone(); - - cached.known_rooms.clone() - } - - pub fn update_sync_subscriptions( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - subscriptions: BTreeMap, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - cached.subscriptions = subscriptions; - } - - pub fn update_sync_known_rooms( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - list_id: String, - new_cached_rooms: BTreeSet, - globalsince: u64, - ) { - let mut cache = self.connections.lock().unwrap(); - let cached = Arc::clone( - cache - .entry((user_id, device_id, conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); - let cached = &mut cached.lock().unwrap(); - drop(cache); - - for (roomid, lastsince) in cached - .known_rooms - .entry(list_id.clone()) - .or_default() - .iter_mut() - { - if !new_cached_rooms.contains(roomid) { - *lastsince = 0; - } - } - let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); - } - } - - /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result { - self.db.is_deactivated(user_id) - } - - /// Check if a user is an admin - pub fn is_admin(&self, user_id: &UserId) -> Result { - let admin_room_alias_id = - RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = services() - .rooms - .alias - .resolve_local_alias(&admin_room_alias_id)? - .unwrap(); - - services() - .rooms - .state_cache - .is_joined(user_id, &admin_room_id) - } - - /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password)?; - Ok(()) - } - - /// Returns the number of users registered on this server. - pub fn count(&self) -> Result { - self.db.count() - } - - /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result> { - self.db.find_from_token(token) - } - - /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator> + '_ { - self.db.iter() - } - - /// Returns a list of local users as list of usernames. - /// - /// A user account is considered `local` if the length of it's password is greater then zero. - pub fn list_local_users(&self) -> Result> { - self.db.list_local_users() - } - - /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result> { - self.db.password_hash(user_id) - } - - /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password) - } - - /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result> { - self.db.displayname(user_id) - } - - /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - self.db.set_displayname(user_id, displayname) - } - - /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result> { - self.db.avatar_url(user_id) - } - - /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { - self.db.set_avatar_url(user_id, avatar_url) - } - - /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result> { - self.db.blurhash(user_id) - } - - /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - self.db.set_blurhash(user_id, blurhash) - } - - /// Adds a new device to a user. - pub fn create_device( - &self, - user_id: &UserId, - device_id: &DeviceId, - token: &str, - initial_device_display_name: Option, - ) -> Result<()> { - self.db - .create_device(user_id, device_id, token, initial_device_display_name) - } - - /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - self.db.remove_device(user_id, device_id) - } - - /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator> + 'a { - self.db.all_device_ids(user_id) - } - - /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - self.db.set_token(user_id, device_id, token) - } - - pub fn add_one_time_key( - &self, - user_id: &UserId, - device_id: &DeviceId, - one_time_key_key: &DeviceKeyId, - one_time_key_value: &Raw, - ) -> Result<()> { - self.db - .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) - } - - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.db.last_one_time_keys_update(user_id) - } - - pub fn take_one_time_key( - &self, - user_id: &UserId, - device_id: &DeviceId, - key_algorithm: &DeviceKeyAlgorithm, - ) -> Result)>> { - self.db.take_one_time_key(user_id, device_id, key_algorithm) - } - - pub fn count_one_time_keys( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result> { - self.db.count_one_time_keys(user_id, device_id) - } - - pub fn add_device_keys( - &self, - user_id: &UserId, - device_id: &DeviceId, - device_keys: &Raw, - ) -> Result<()> { - self.db.add_device_keys(user_id, device_id, device_keys) - } - - pub fn add_cross_signing_keys( - &self, - user_id: &UserId, - master_key: &Raw, - self_signing_key: &Option>, - user_signing_key: &Option>, - notify: bool, - ) -> Result<()> { - self.db.add_cross_signing_keys( - user_id, - master_key, - self_signing_key, - user_signing_key, - notify, - ) - } - - pub fn sign_key( - &self, - target_id: &UserId, - key_id: &str, - signature: (String, String), - sender_id: &UserId, - ) -> Result<()> { - self.db.sign_key(target_id, key_id, signature, sender_id) - } - - pub fn keys_changed<'a>( - &'a self, - user_or_room_id: &str, - from: u64, - to: Option, - ) -> impl Iterator> + 'a { - self.db.keys_changed(user_or_room_id, from, to) - } - - pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { - self.db.mark_device_key_update(user_id) - } - - pub fn get_device_keys( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result>> { - self.db.get_device_keys(user_id, device_id) - } - - pub fn parse_master_key( - &self, - user_id: &UserId, - master_key: &Raw, - ) -> Result<(Vec, CrossSigningKey)> { - self.db.parse_master_key(user_id, master_key) - } - - pub fn get_key( - &self, - key: &[u8], - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_key(key, sender_user, user_id, allowed_signatures) - } - - pub fn get_master_key( - &self, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_master_key(sender_user, user_id, allowed_signatures) - } - - pub fn get_self_signing_key( - &self, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: &dyn Fn(&UserId) -> bool, - ) -> Result>> { - self.db - .get_self_signing_key(sender_user, user_id, allowed_signatures) - } - - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.db.get_user_signing_key(user_id) - } - - pub fn add_to_device_event( - &self, - sender: &UserId, - target_user_id: &UserId, - target_device_id: &DeviceId, - event_type: &str, - content: serde_json::Value, - ) -> Result<()> { - self.db.add_to_device_event( - sender, - target_user_id, - target_device_id, - event_type, - content, - ) - } - - pub fn get_to_device_events( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result>> { - self.db.get_to_device_events(user_id, device_id) - } - - pub fn remove_to_device_events( - &self, - user_id: &UserId, - device_id: &DeviceId, - until: u64, - ) -> Result<()> { - self.db.remove_to_device_events(user_id, device_id, until) - } - - pub fn update_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - device: &Device, - ) -> Result<()> { - self.db.update_device_metadata(user_id, device_id, device) - } - - /// Get device metadata. - pub fn get_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result> { - self.db.get_device_metadata(user_id, device_id) - } - - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.db.get_devicelist_version(user_id) - } - - pub fn all_devices_metadata<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator> + 'a { - self.db.all_devices_metadata(user_id) - } - - /// Deactivate account - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } - - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.db.set_password(user_id, None)?; - - // TODO: Unhook 3PID - Ok(()) - } - - /// Creates a new sync filter. Returns the filter id. - pub fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result { - self.db.create_filter(user_id, filter) - } - - pub fn get_filter( - &self, - user_id: &UserId, - filter_id: &str, - ) -> Result> { - self.db.get_filter(user_id, filter_id) - } -} - -/// Ensure that a user only sees signatures from themselves and the target user -pub fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, - sender_user: Option<&UserId>, - user_id: &UserId, - allowed_signatures: F, -) -> Result<(), Error> { - if let Some(signatures) = cross_signing_key - .get_mut("signatures") - .and_then(|v| v.as_object_mut()) - { - // Don't allocate for the full size of the current signatures, but require - // at most one resize if nothing is dropped - let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let sid = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if sender_user == Some(user_id) || sid == user_id || allowed_signatures(sid) { - signatures.insert(user, signature); - } - } - } - - Ok(()) -} diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 00000000..fcecdb9c --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,523 @@ +use std::{ + borrow::Cow, + cmp, fmt, + fmt::Write, + io, + path::{Component, Path, PathBuf}, + str::FromStr, + time::{SystemTime, UNIX_EPOCH}, +}; + +use argon2::{password_hash, Argon2, PasswordHasher, PasswordVerifier}; +use cmp::Ordering; +use rand::{prelude::*, rngs::OsRng}; +use ring::digest; +use ruma::{ + api::client::error::ErrorKind, canonical_json::try_from_json_map, + CanonicalJsonError, CanonicalJsonObject, MxcUri, MxcUriError, OwnedMxcUri, +}; +use tokio::fs; + +use crate::{Error, Result}; + +pub(crate) mod error; +pub(crate) mod on_demand_hashmap; +pub(crate) mod proxy_protocol; +pub(crate) mod room_version; + +// Hopefully we have a better chat protocol in 530 years +#[allow(clippy::as_conversions, clippy::cast_possible_truncation)] +pub(crate) fn millis_since_unix_epoch() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time is valid") + .as_millis() as u64 +} + +#[cfg(any(feature = "rocksdb", feature = "sqlite"))] +pub(crate) fn increment(old: Option<&[u8]>) -> Vec { + let number = match old.map(TryInto::try_into) { + Some(Ok(bytes)) => { + let number = u64::from_be_bytes(bytes); + number + 1 + } + // Start at one. since 0 should return the first event in the db + _ => 1, + }; + + number.to_be_bytes().to_vec() +} + +pub(crate) fn generate_keypair() -> Vec { + let mut value = random_string(8).as_bytes().to_vec(); + value.push(0xFF); + value.extend_from_slice( + &ruma::signatures::Ed25519KeyPair::generate() + .expect("Ed25519KeyPair generation always works (?)"), + ); + value +} + +/// Parses the bytes into an u64. +pub(crate) fn u64_from_bytes( + bytes: &[u8], +) -> Result { + let array: [u8; 8] = bytes.try_into()?; + Ok(u64::from_be_bytes(array)) +} + +/// Parses the bytes into a string. +pub(crate) fn string_from_bytes( + bytes: &[u8], +) -> Result { + String::from_utf8(bytes.to_vec()) +} + +pub(crate) fn random_string(length: usize) -> String { + thread_rng() + .sample_iter(&rand::distributions::Alphanumeric) + .take(length) + .map(char::from) + .collect() +} + +/// Hash the given password +pub(crate) fn hash_password( + password: B, +) -> Result +where + B: AsRef<[u8]>, +{ + Argon2::default() + .hash_password( + password.as_ref(), + &password_hash::SaltString::generate(&mut OsRng), + ) + .map(|x| x.serialize()) +} + +/// Compare a password to a hash +/// +/// Returns `true` if the password matches the hash, `false` otherwise. +pub(crate) fn verify_password(hash: S, password: B) -> bool +where + S: AsRef, + B: AsRef<[u8]>, +{ + let Ok(hash) = password_hash::PasswordHash::new(hash.as_ref()) else { + return false; + }; + + Argon2::default().verify_password(password.as_ref(), &hash).is_ok() +} + +#[tracing::instrument(skip(keys))] +pub(crate) fn calculate_hash(keys: I) -> Vec +where + I: IntoIterator, + T: AsRef<[u8]>, +{ + let mut bytes = Vec::new(); + for (i, key) in keys.into_iter().enumerate() { + if i != 0 { + bytes.push(0xFF); + } + bytes.extend_from_slice(key.as_ref()); + } + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() +} + +pub(crate) fn common_elements( + mut iterators: I, + check_order: F, +) -> Option> +where + I: Iterator, + I::Item: Iterator, + F: Fn(&T, &T) -> Ordering, +{ + let first_iterator = iterators.next()?; + let mut other_iterators = + iterators.map(Iterator::peekable).collect::>(); + + Some(first_iterator.filter(move |target| { + other_iterators.iter_mut().all(|it| { + while let Some(element) = it.peek() { + match check_order(element, target) { + // We went too far + Ordering::Greater => return false, + // Element is in both iters + Ordering::Equal => return true, + // Keep searching + Ordering::Less => { + it.next(); + } + } + } + false + }) + })) +} + +/// Fallible conversion from any value that implements `Serialize` to a +/// `CanonicalJsonObject`. +/// +/// `value` must serialize to an `serde_json::Value::Object`. +pub(crate) fn to_canonical_object( + value: T, +) -> Result { + use serde::ser::Error; + + match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { + serde_json::Value::Object(map) => try_from_json_map(map), + _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom( + "Value must be an object", + ))), + } +} + +pub(crate) fn deserialize_from_str< + 'de, + D: serde::de::Deserializer<'de>, + T: FromStr, + E: fmt::Display, +>( + deserializer: D, +) -> Result { + struct Visitor, E>(std::marker::PhantomData); + impl, Err: fmt::Display> serde::de::Visitor<'_> + for Visitor + { + type Value = T; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(formatter, "a parsable string") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + v.parse().map_err(serde::de::Error::custom) + } + } + deserializer.deserialize_str(Visitor(std::marker::PhantomData)) +} + +/// Debug-formats the given slice, but only up to the first `max_len` elements. +/// Any further elements are replaced by an ellipsis. +/// +/// See also [`debug_slice_truncated()`], +pub(crate) struct TruncatedDebugSlice<'a, T> { + inner: &'a [T], + max_len: usize, +} + +impl fmt::Debug for TruncatedDebugSlice<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.inner.len() <= self.max_len { + write!(f, "{:?}", self.inner) + } else { + f.debug_list() + .entries(&self.inner[..self.max_len]) + .entry(&"...") + .finish() + } + } +} + +/// See [`TruncatedDebugSlice`]. Useful for `#[instrument]`: +/// +/// ```ignore +/// #[tracing::instrument(fields( +/// foos = debug_slice_truncated(foos, N) +/// ))] +/// ``` +pub(crate) fn debug_slice_truncated( + slice: &[T], + max_len: usize, +) -> tracing::field::DebugValue> { + tracing::field::debug(TruncatedDebugSlice { + inner: slice, + max_len, + }) +} + +/// Truncates a string to an approximate maximum length, replacing any extra +/// text with an ellipsis. +/// +/// Only to be used for informational purposes, exact semantics are unspecified. +pub(crate) fn dbg_truncate_str(s: &str, mut max_len: usize) -> Cow<'_, str> { + while max_len < s.len() && !s.is_char_boundary(max_len) { + max_len += 1; + } + + if s.len() <= max_len { + s.into() + } else { + #[allow(clippy::string_slice)] // we checked it's at a char boundary + format!("{}...", &s[..max_len]).into() + } +} + +/// Data that makes up an `mxc://` URL. +#[derive(Debug, Clone)] +pub(crate) struct MxcData<'a> { + pub(crate) server_name: &'a ruma::ServerName, + pub(crate) media_id: &'a str, +} + +impl<'a> MxcData<'a> { + pub(crate) fn new( + server_name: &'a ruma::ServerName, + media_id: &'a str, + ) -> Result { + if !media_id.bytes().all(|b| { + matches!(b, + b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'-' | b'_' + ) + }) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid MXC media id", + )); + } + + Ok(Self { + server_name, + media_id, + }) + } +} + +impl fmt::Display for MxcData<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "mxc://{}/{}", self.server_name, self.media_id) + } +} + +impl From> for OwnedMxcUri { + fn from(value: MxcData<'_>) -> Self { + value.to_string().into() + } +} + +impl<'a> TryFrom<&'a MxcUri> for MxcData<'a> { + type Error = MxcUriError; + + fn try_from(value: &'a MxcUri) -> Result { + Ok(Self::new(value.server_name()?, value.media_id()?) + .expect("validated MxcUri should always be valid MxcData")) + } +} + +fn curlify_args(req: &http::Request) -> Option> { + let mut args = + vec!["curl".to_owned(), "-X".to_owned(), req.method().to_string()]; + + for (name, val) in req.headers() { + args.extend([ + "-H".to_owned(), + format!("{name}: {}", val.to_str().ok()?), + ]); + } + + let fix_uri = || { + if req.uri().scheme().is_some() { + return None; + } + if req.uri().authority().is_some() { + return None; + } + let mut parts = req.uri().clone().into_parts(); + + parts.scheme = Some(http::uri::Scheme::HTTPS); + + let host = + req.headers().get(http::header::HOST)?.to_str().ok()?.to_owned(); + parts.authority = + Some(http::uri::Authority::from_maybe_shared(host).ok()?); + + http::uri::Uri::from_parts(parts).ok() + }; + + let uri = if let Some(new_uri) = fix_uri() { + Cow::Owned(new_uri) + } else { + Cow::Borrowed(req.uri()) + }; + + args.push(uri.to_string()); + + Some(args) +} + +pub(crate) fn curlify(req: &http::Request) -> Option { + let args = curlify_args(req)?; + + Some( + args.into_iter() + .map(|arg| { + if arg.chars().all(|c| { + c.is_alphanumeric() || ['-', '_', ':', '/'].contains(&c) + }) { + arg + } else { + format!("'{}'", arg.replace('\'', "\\'")) + } + }) + .collect::>() + .join(" "), + ) +} + +/// Format a u8 slice as an uppercase hex string +/// +/// The output does not contain a leading `0x` nor any non-hex characters (e.g. +/// whitespace or commas do not appear in the output). +pub(crate) fn u8_slice_to_hex(slice: &[u8]) -> String { + slice.iter().fold(String::new(), |mut acc, x| { + write!(acc, "{x:02X}").expect("in-memory write should succeed"); + acc + }) +} + +/// Canonicalize a path where some components may not exist yet. +/// +/// It's assumed that non-existent components will be created as +/// directories. This should match the result of [`fs::canonicalize`] +/// _after_ calling [`fs::create_dir_all`] on `path`. +pub(crate) async fn partial_canonicalize(path: &Path) -> io::Result { + let mut ret = std::env::current_dir()?; + + let mut base_path = Cow::Borrowed(path); + let mut components = base_path.components(); + + while let Some(component) = components.next() { + match component { + Component::Prefix(_) | Component::RootDir => { + let component_path: &Path = component.as_ref(); + component_path.clone_into(&mut ret); + } + Component::CurDir => (), + Component::ParentDir => { + ret.pop(); + } + Component::Normal(p) => { + let component_path = ret.join(p); + match fs::symlink_metadata(&component_path).await { + // path is a symlink + Ok(metadata) if metadata.is_symlink() => { + let destination = + fs::read_link(&component_path).await?; + // iterate over the symlink destination components + // before continuing with the original path + base_path = + Cow::Owned(destination.join(components.as_path())); + components = base_path.components(); + } + // path exists, not a symlink + Ok(_) => { + ret.push(p); + } + // path does not exist + Err(error) if error.kind() == io::ErrorKind::NotFound => { + // assume a directory will be created here + ret.push(p); + } + Err(error) => return Err(error), + } + } + } + } + + Ok(ret) +} + +#[cfg(test)] +mod tests { + use tempfile::TempDir; + use tokio::fs; + + use crate::utils::{ + dbg_truncate_str, partial_canonicalize, u8_slice_to_hex, + }; + + #[test] + fn test_truncate_str() { + assert_eq!(dbg_truncate_str("short", 10), "short"); + assert_eq!(dbg_truncate_str("very long string", 10), "very long ..."); + assert_eq!(dbg_truncate_str("no info, only dots", 0), "..."); + assert_eq!(dbg_truncate_str("", 0), ""); + assert_eq!(dbg_truncate_str("unicöde", 5), "unicö..."); + let ok_hand = "👌🏽"; + assert_eq!(dbg_truncate_str(ok_hand, 1), "👌..."); + assert_eq!(dbg_truncate_str(ok_hand, ok_hand.len() - 1), "👌🏽"); + assert_eq!(dbg_truncate_str(ok_hand, ok_hand.len()), "👌🏽"); + } + + #[test] + fn test_slice_to_hex() { + assert_eq!(u8_slice_to_hex(&[]), ""); + assert_eq!(u8_slice_to_hex(&[0]), "00"); + assert_eq!(u8_slice_to_hex(&[0xFF]), "FF"); + assert_eq!(u8_slice_to_hex(&[1, 2, 3, 4]), "01020304"); + assert_eq!( + u8_slice_to_hex(&[0x42; 100]), + "4242424242424242424242424242424242424242\ + 4242424242424242424242424242424242424242\ + 4242424242424242424242424242424242424242\ + 4242424242424242424242424242424242424242\ + 4242424242424242424242424242424242424242" + ); + } + + #[tokio::test] + async fn test_partial_canonicalize() { + let tmp_dir = + TempDir::with_prefix("test_partial_canonicalize").unwrap(); + let path = tmp_dir.path(); + + fs::create_dir(&path.join("dir")).await.unwrap(); + fs::symlink(path.join("dir"), path.join("absolute-link-to-dir")) + .await + .unwrap(); + fs::symlink("./dir", path.join("relative-link-to-dir")).await.unwrap(); + + assert_eq!(partial_canonicalize(path).await.unwrap(), path); + assert_eq!(partial_canonicalize(&path.join("./")).await.unwrap(), path); + assert_eq!( + partial_canonicalize(&path.join("dir/..")).await.unwrap(), + path + ); + assert_eq!( + partial_canonicalize(&path.join("absolute-link-to-dir")) + .await + .unwrap(), + path.join("dir") + ); + assert_eq!( + partial_canonicalize(&path.join("relative-link-to-dir")) + .await + .unwrap(), + path.join("dir") + ); + assert_eq!( + partial_canonicalize(&path.join("absolute-link-to-dir/new-dir")) + .await + .unwrap(), + path.join("dir/new-dir") + ); + assert_eq!( + partial_canonicalize( + &path.join("absolute-link-to-dir/new-dir/../..") + ) + .await + .unwrap(), + path, + ); + + tmp_dir.close().unwrap(); + } +} diff --git a/src/utils/error.rs b/src/utils/error.rs index 448f0665..9e36c3b3 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -9,60 +9,52 @@ use ruma::{ OwnedServerName, }; use thiserror::Error; -use tracing::{error, info}; +use tracing::{error, warn}; -#[cfg(feature = "persy")] -use persy::PersyError; +use crate::Ra; -use crate::RumaResponse; - -pub type Result = std::result::Result; +pub(crate) type Result = std::result::Result; #[derive(Error, Debug)] -pub enum Error { - #[cfg(feature = "sled")] - #[error("There was a problem with the connection to the sled database.")] - SledError { - #[from] - source: sled::Error, - }, +#[allow(clippy::error_impl_error)] +pub(crate) enum Error { #[cfg(feature = "sqlite")] - #[error("There was a problem with the connection to the sqlite database: {source}")] - SqliteError { + #[error( + "There was a problem with the connection to the sqlite database: \ + {source}" + )] + Sqlite { #[from] source: rusqlite::Error, }, - #[cfg(feature = "persy")] - #[error("There was a problem with the connection to the persy database.")] - PersyError { source: PersyError }, - #[cfg(feature = "heed")] - #[error("There was a problem with the connection to the heed database: {error}")] - HeedError { error: String }, #[cfg(feature = "rocksdb")] - #[error("There was a problem with the connection to the rocksdb database: {source}")] - RocksDbError { + #[error( + "There was a problem with the connection to the rocksdb database: \ + {source}" + )] + RocksDb { #[from] source: rocksdb::Error, }, #[error("Could not generate an image.")] - ImageError { + Image { #[from] source: image::error::ImageError, }, #[error("Could not connect to server: {source}")] - ReqwestError { + Reqwest { #[from] source: reqwest::Error, }, #[error("Could build regular expression: {source}")] - RegexError { + Regex { #[from] source: regex::Error, }, #[error("{0}")] - FederationError(OwnedServerName, RumaError), + Federation(OwnedServerName, RumaError), #[error("Could not do this io: {source}")] - IoError { + Io { #[from] source: std::io::Error, }, @@ -71,126 +63,143 @@ pub enum Error { #[error("{0}")] BadConfig(&'static str), #[error("{0}")] - /// Don't create this directly. Use Error::bad_database instead. + /// Don't create this directly. Use [`Error::bad_database`] instead. BadDatabase(&'static str), #[error("uiaa")] - Uiaa(UiaaInfo), - #[error("{0}: {1}")] + Uiaa(Box), + #[error("{}: {}", .0.errcode(), .1)] BadRequest(ErrorKind, &'static str), + // This is only needed for when a room alias already exists #[error("{0}")] - Conflict(&'static str), // This is only needed for when a room alias already exists - #[cfg(feature = "conduit_bin")] + Conflict(&'static str), #[error("{0}")] - ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), - #[cfg(feature = "conduit_bin")] + Extension(#[from] axum::extract::rejection::ExtensionRejection), #[error("{0}")] - PathError(#[from] axum::extract::rejection::PathRejection), + Path(#[from] axum::extract::rejection::PathRejection), #[error("{0}")] AdminCommand(&'static str), #[error("from {0}: {1}")] - RedactionError(OwnedServerName, ruma::canonical_json::RedactionError), + Redaction(OwnedServerName, ruma::canonical_json::RedactionError), + #[error("unsupported room version {0}")] + UnsupportedRoomVersion(ruma::RoomVersionId), #[error("{0} in {1}")] InconsistentRoomState(&'static str, ruma::OwnedRoomId), } impl Error { - pub fn bad_database(message: &'static str) -> Self { - error!("BadDatabase: {}", message); + pub(crate) fn bad_database(message: &'static str) -> Self { + error!(message, "Bad database"); Self::BadDatabase(message) } - pub fn bad_config(message: &'static str) -> Self { - error!("BadConfig: {}", message); + pub(crate) fn bad_config(message: &'static str) -> Self { + error!(message, "Bad config"); Self::BadConfig(message) } -} -impl Error { - pub fn to_response(&self) -> RumaResponse { + pub(crate) fn to_response(&self) -> Ra { + use ErrorKind::{ + Forbidden, GuestAccessForbidden, LimitExceeded, MissingToken, + NotFound, NotYetUploaded, ThreepidAuthFailed, ThreepidDenied, + TooLarge, Unauthorized, Unknown, UnknownToken, Unrecognized, + UserDeactivated, WrongRoomKeysVersion, + }; + if let Self::Uiaa(uiaainfo) = self { - return RumaResponse(UiaaResponse::AuthResponse(uiaainfo.clone())); + return Ra(UiaaResponse::AuthResponse(*uiaainfo.clone())); } - if let Self::FederationError(origin, error) = self { + if let Self::Federation(origin, error) = self { let mut error = error.clone(); error.body = ErrorBody::Standard { kind: Unknown, message: format!("Answer from {origin}: {error}"), }; - return RumaResponse(UiaaResponse::MatrixError(error)); + return Ra(UiaaResponse::MatrixError(error)); } let message = format!("{self}"); - use ErrorKind::*; let (kind, status_code) = match self { Self::BadRequest(kind, _) => ( kind.clone(), match kind { - WrongRoomKeysVersion { .. } - | Forbidden + WrongRoomKeysVersion { + .. + } + | Forbidden { + .. + } | GuestAccessForbidden | ThreepidAuthFailed + | UserDeactivated | ThreepidDenied => StatusCode::FORBIDDEN, - Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, + Unauthorized + | UnknownToken { + .. + } + | MissingToken => StatusCode::UNAUTHORIZED, NotFound | Unrecognized => StatusCode::NOT_FOUND, - LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, - UserDeactivated => StatusCode::FORBIDDEN, + LimitExceeded { + .. + } => StatusCode::TOO_MANY_REQUESTS, TooLarge => StatusCode::PAYLOAD_TOO_LARGE, + NotYetUploaded => StatusCode::GATEWAY_TIMEOUT, _ => StatusCode::BAD_REQUEST, }, ), + Self::UnsupportedRoomVersion(_) => ( + ErrorKind::UnsupportedRoomVersion, + StatusCode::INTERNAL_SERVER_ERROR, + ), Self::Conflict(_) => (Unknown, StatusCode::CONFLICT), _ => (Unknown, StatusCode::INTERNAL_SERVER_ERROR), }; - info!("Returning an error: {}: {}", status_code, message); + warn!(%status_code, error = %message, "Responding with an error"); - RumaResponse(UiaaResponse::MatrixError(RumaError { - body: ErrorBody::Standard { kind, message }, + Ra(UiaaResponse::MatrixError(RumaError { + body: ErrorBody::Standard { + kind, + message, + }, status_code, })) } /// Sanitizes public-facing errors that can leak sensitive information. - pub fn sanitized_error(&self) -> String { + pub(crate) fn sanitized_error(&self) -> String { let db_error = String::from("Database or I/O error occurred."); match self { - #[cfg(feature = "sled")] - Self::SledError { .. } => db_error, #[cfg(feature = "sqlite")] - Self::SqliteError { .. } => db_error, - #[cfg(feature = "persy")] - Self::PersyError { .. } => db_error, - #[cfg(feature = "heed")] - Self::HeedError => db_error, + Self::Sqlite { + .. + } => db_error, #[cfg(feature = "rocksdb")] - Self::RocksDbError { .. } => db_error, - Self::IoError { .. } => db_error, - Self::BadConfig { .. } => db_error, - Self::BadDatabase { .. } => db_error, + Self::RocksDb { + .. + } => db_error, + Self::Io { + .. + } => db_error, + Self::BadConfig { + .. + } => db_error, + Self::BadDatabase { + .. + } => db_error, _ => self.to_string(), } } } -#[cfg(feature = "persy")] -impl> From> for Error { - fn from(err: persy::PE) -> Self { - Error::PersyError { - source: err.error().into(), - } - } -} - impl From for Error { fn from(i: Infallible) -> Self { match i {} } } -#[cfg(feature = "conduit_bin")] impl axum::response::IntoResponse for Error { fn into_response(self) -> axum::response::Response { self.to_response().into_response() diff --git a/src/utils/mod.rs b/src/utils/mod.rs deleted file mode 100644 index 0b5b1ae4..00000000 --- a/src/utils/mod.rs +++ /dev/null @@ -1,182 +0,0 @@ -pub mod error; - -use argon2::{Config, Variant}; -use cmp::Ordering; -use rand::prelude::*; -use ring::digest; -use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; -use std::{ - cmp, fmt, - str::FromStr, - time::{SystemTime, UNIX_EPOCH}, -}; - -pub fn millis_since_unix_epoch() -> u64 { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("time is valid") - .as_millis() as u64 -} - -pub fn increment(old: Option<&[u8]>) -> Option> { - let number = match old.map(|bytes| bytes.try_into()) { - Some(Ok(bytes)) => { - let number = u64::from_be_bytes(bytes); - number + 1 - } - _ => 1, // Start at one. since 0 should return the first event in the db - }; - - Some(number.to_be_bytes().to_vec()) -} - -pub fn generate_keypair() -> Vec { - let mut value = random_string(8).as_bytes().to_vec(); - value.push(0xff); - value.extend_from_slice( - &ruma::signatures::Ed25519KeyPair::generate() - .expect("Ed25519KeyPair generation always works (?)"), - ); - value -} - -/// Parses the bytes into an u64. -pub fn u64_from_bytes(bytes: &[u8]) -> Result { - let array: [u8; 8] = bytes.try_into()?; - Ok(u64::from_be_bytes(array)) -} - -/// Parses the bytes into a string. -pub fn string_from_bytes(bytes: &[u8]) -> Result { - String::from_utf8(bytes.to_vec()) -} - -pub fn random_string(length: usize) -> String { - thread_rng() - .sample_iter(&rand::distributions::Alphanumeric) - .take(length) - .map(char::from) - .collect() -} - -/// Calculate a new hash for the given password -pub fn calculate_password_hash(password: &str) -> Result { - let hashing_config = Config { - variant: Variant::Argon2id, - ..Default::default() - }; - - let salt = random_string(32); - argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) -} - -#[tracing::instrument(skip(keys))] -pub fn calculate_hash(keys: &[&[u8]]) -> Vec { - // We only hash the pdu's event ids, not the whole pdu - let bytes = keys.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().to_owned() -} - -pub fn common_elements( - mut iterators: impl Iterator>>, - check_order: impl Fn(&[u8], &[u8]) -> Ordering, -) -> Option>> { - let first_iterator = iterators.next()?; - let mut other_iterators = iterators.map(|i| i.peekable()).collect::>(); - - Some(first_iterator.filter(move |target| { - other_iterators.iter_mut().all(|it| { - while let Some(element) = it.peek() { - match check_order(element, target) { - Ordering::Greater => return false, // We went too far - Ordering::Equal => return true, // Element is in both iters - Ordering::Less => { - // Keep searching - it.next(); - } - } - } - false - }) - })) -} - -/// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. -/// -/// `value` must serialize to an `serde_json::Value::Object`. -pub fn to_canonical_object( - value: T, -) -> Result { - use serde::ser::Error; - - match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { - serde_json::Value::Object(map) => try_from_json_map(map), - _ => Err(CanonicalJsonError::SerDe(serde_json::Error::custom( - "Value must be an object", - ))), - } -} - -pub fn deserialize_from_str< - 'de, - D: serde::de::Deserializer<'de>, - T: FromStr, - E: std::fmt::Display, ->( - deserializer: D, -) -> Result { - struct Visitor, E>(std::marker::PhantomData); - impl<'de, T: FromStr, Err: std::fmt::Display> serde::de::Visitor<'de> - for Visitor - { - type Value = T; - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a parsable string") - } - fn visit_str(self, v: &str) -> Result - where - E: serde::de::Error, - { - v.parse().map_err(serde::de::Error::custom) - } - } - deserializer.deserialize_str(Visitor(std::marker::PhantomData)) -} - -// Copied from librustdoc: -// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs - -/// Wrapper struct which will emit the HTML-escaped version of the contained -/// string when passed to a format string. -pub struct HtmlEscape<'a>(pub &'a str); - -impl<'a> fmt::Display for HtmlEscape<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - // Because the internet is always right, turns out there's not that many - // characters to escape: http://stackoverflow.com/questions/7381974 - let HtmlEscape(s) = *self; - let pile_o_bits = s; - let mut last = 0; - for (i, ch) in s.char_indices() { - let s = match ch { - '>' => ">", - '<' => "<", - '&' => "&", - '\'' => "'", - '"' => """, - _ => continue, - }; - fmt.write_str(&pile_o_bits[last..i])?; - fmt.write_str(s)?; - // NOTE: we only expect single byte characters here - which is fine as long as we - // only match single byte characters - last = i + 1; - } - - if last < s.len() { - fmt.write_str(&pile_o_bits[last..])?; - } - Ok(()) - } -} diff --git a/src/utils/on_demand_hashmap.rs b/src/utils/on_demand_hashmap.rs new file mode 100644 index 00000000..9f74e3d6 --- /dev/null +++ b/src/utils/on_demand_hashmap.rs @@ -0,0 +1,304 @@ +use std::{ + collections::HashMap, + fmt, + hash::Hash, + marker::PhantomData, + ops::Deref, + sync::{Arc, Weak}, +}; + +use tokio::sync::{mpsc, Mutex, OwnedMutexGuard, RwLock}; +use tracing::{trace, warn, Level}; + +use crate::observability::METRICS; + +/// Data shared between [`OnDemandHashMap`] and the cleanup task +/// +/// Importantly it does not contain the `cleanup_sender`, since it getting +/// dropped signals the cleanup task to exit. If the cleanup task had an owned +/// reference to it, the only way for it to exit would be for every [`Entry`] to +/// be dropped, we don't want to rely on that. +struct SharedData { + name: Arc, + /// Values are owned by their [entries][Entry] + entries: RwLock>>, +} + +impl SharedData +where + K: Hash + Eq + Clone + fmt::Debug, +{ + #[tracing::instrument( + level = Level::TRACE, + skip(self), + fields(name = self.name.as_ref()), + )] + async fn try_cleanup_entry(&self, key: K) { + let mut map = self.entries.write().await; + + let Some(weak) = map.get(&key) else { + trace!("Entry has already been cleaned up"); + return; + }; + + if weak.strong_count() != 0 { + trace!("Entry is in use"); + return; + } + + trace!("Cleaning up unused entry"); + map.remove(&key); + METRICS.record_on_demand_hashmap_size(self.name.clone(), map.len()); + } + + #[tracing::instrument(level = Level::TRACE, skip(map))] + fn try_get_live_value( + pass: usize, + map: &HashMap>, + key: &K, + ) -> Option> { + if let Some(value) = map.get(key) { + if let Some(value) = value.upgrade() { + trace!(pass, "Using existing value"); + return Some(value); + } + + trace!( + pass, + "Existing value is stale and needs cleanup, creating new" + ); + } else { + trace!(pass, "No existing value, creating new"); + } + + None + } + + /// Either returns an existing live value, or creates a new one and inserts + /// it into the map. + #[tracing::instrument(level = Level::TRACE, skip(self, create))] + async fn get_or_insert_with(&self, key: &K, create: F) -> Arc + where + F: FnOnce() -> V, + { + { + // first, take a read lock and try to get an existing value + + // TODO check if this fast path actually makes it faster, possibly + // make it configurable per OnDemandHashMap depending on contention + // and how expensive create() is + let map = self.entries.read().await; + if let Some(v) = Self::try_get_live_value(1, &map, key) { + return v; + } + } + + // no entry or it has died, create a new one + let value = Arc::new(create()); + let weak = Arc::downgrade(&value); + + // take a write lock, try again, otherwise insert our new value + let mut map = self.entries.write().await; + if let Some(v) = Self::try_get_live_value(2, &map, key) { + // another entry showed up while we had let go of the lock, + // use that + drop(value); + drop(weak); + return v; + } + + map.insert(key.clone(), weak); + METRICS.record_on_demand_hashmap_size(self.name.clone(), map.len()); + + value + } +} + +/// A [`HashMap`] whose entries are automatically removed once they are no +/// longer referenced. +pub(crate) struct OnDemandHashMap { + /// The data shared between the [`OnDemandHashMap`] and the cleanup task. + shared: Arc>, + /// This is the only non-[weak][mpsc::WeakUnboundedSender] `Sender`, which + /// means that dropping the `OnDemandHashMap` causes the cleanup + /// process to exit. + cleanup_sender: mpsc::UnboundedSender, +} + +impl OnDemandHashMap +where + K: Hash + Eq + Clone + fmt::Debug + Send + Sync + 'static, + V: Send + Sync + 'static, +{ + /// Creates a new `OnDemandHashMap`. The `name` is used for metrics and + /// should be unique to this instance. + pub(crate) fn new(name: String) -> Self { + let (cleanup_sender, mut receiver) = mpsc::unbounded_channel(); + + let shared = Arc::new(SharedData { + name: name.into(), + entries: RwLock::new(HashMap::new()), + }); + + { + let shared = Arc::clone(&shared); + tokio::task::spawn(async move { + loop { + let Some(key) = receiver.recv().await else { + trace!( + name = shared.name.as_ref(), + "Channel has died, exiting cleanup task" + ); + return; + }; + + shared.try_cleanup_entry(key).await; + } + }); + } + + Self { + shared, + cleanup_sender, + } + } + + #[tracing::instrument(level = Level::TRACE, skip(self, create))] + pub(crate) async fn get_or_insert_with( + &self, + key: K, + create: F, + ) -> Entry + where + F: FnOnce() -> V, + { + let value = self.shared.get_or_insert_with(&key, create).await; + + Entry { + drop_guard: EntryDropGuard { + cleanup_sender: self.cleanup_sender.downgrade(), + key: Some(key), + }, + value, + } + } +} + +struct EntryDropGuard { + cleanup_sender: mpsc::WeakUnboundedSender, + /// Only `None` during `drop()` + key: Option, +} + +impl Drop for EntryDropGuard { + fn drop(&mut self) { + let Some(cleanup_sender) = self.cleanup_sender.upgrade() else { + trace!("Backing map has already been dropped"); + return; + }; + + if let Err(error) = cleanup_sender + .send(self.key.take().expect("drop should only be called once")) + { + warn!(%error, "Failed to send cleanup message"); + } + } +} + +/// A wrapper around a key/value pair inside an [`OnDemandHashMap`] +/// +/// If every `Entry` for a specific key is dropped, the value is removed from +/// the map. +pub(crate) struct Entry { + drop_guard: EntryDropGuard, + value: Arc, +} + +impl Deref for Entry { + type Target = V; + + fn deref(&self) -> &Self::Target { + self.value.as_ref() + } +} + +/// Internal zero-sized type used to swallow the [`TokenSet`]'s marker type +struct TokenMarker(PhantomData T>); + +/// A collection of dynamically-created locks, one for each value of `K`. +/// +/// A given key can be locked using [`TokenSet::lock_key()`], which will either +/// return an ownership token immediately if the key is not currently locked, or +/// wait until the previous lock has been released. +/// +/// The marker type `M` can be used to disambiguate different `TokenSet` +/// instances to avoid misuse of tokens. +pub(crate) struct TokenSet { + inner: OnDemandHashMap>>, +} + +impl TokenSet +where + K: Hash + Eq + Clone + fmt::Debug + Send + Sync + 'static, + M: 'static, +{ + /// Creates a new `TokenSet`. The `name` is used for metrics and should be + /// unique to this instance. + pub(crate) fn new(name: String) -> Self { + Self { + inner: OnDemandHashMap::new(name), + } + } + + /// Locks this key in the `TokenSet`, returning a token proving + /// unique access. + #[tracing::instrument(level = Level::TRACE, skip(self))] + pub(crate) async fn lock_key(&self, key: K) -> KeyToken { + let Entry { + drop_guard, + value, + } = self + .inner + .get_or_insert_with(key, || Mutex::new(TokenMarker(PhantomData))) + .await; + + KeyToken { + drop_guard, + _mutex_guard: value.lock_owned().await, + } + } +} + +/// Unique token for a given key in a [`TokenSet`]. +/// +/// Ownership of this token proves that no other [`KeyToken`] for this key in +/// this [`TokenSet`] currently exists. +/// +/// Access to the underlying key is provided by a [`Deref`] impl. +pub(crate) struct KeyToken { + drop_guard: EntryDropGuard, + _mutex_guard: OwnedMutexGuard>, +} + +impl Deref for KeyToken { + type Target = K; + + fn deref(&self) -> &Self::Target { + self.drop_guard + .key + .as_ref() + .expect("key should only be None during Drop") + } +} + +impl fmt::Debug for KeyToken { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", &**self) + } +} + +impl fmt::Display for KeyToken { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", &**self) + } +} diff --git a/src/utils/proxy_protocol.rs b/src/utils/proxy_protocol.rs new file mode 100644 index 00000000..53021755 --- /dev/null +++ b/src/utils/proxy_protocol.rs @@ -0,0 +1,161 @@ +use std::{ + future::Future, io::ErrorKind, pin::Pin, task::Poll, time::Duration, +}; + +use axum::{middleware::AddExtension, Extension}; +use axum_server::accept::Accept; +use pin_project_lite::pin_project; +use proxy_header::{io::ProxiedStream, ParseConfig, ProxyHeader}; +use tokio::{ + io::AsyncRead, + time::{timeout, Timeout}, +}; +use tower::Layer; +use tracing::warn; + +#[derive(Debug, Clone)] +pub(crate) struct ProxyAcceptorConfig { + pub(crate) header_timeout: Duration, + pub(crate) parse_config: ParseConfig, +} + +impl Default for ProxyAcceptorConfig { + fn default() -> Self { + Self { + header_timeout: Duration::from_secs(5), + parse_config: ParseConfig::default(), + } + } +} + +#[derive(Debug, Clone)] +pub(crate) struct ProxyAcceptor
{ + inner: A, + config: ProxyAcceptorConfig, +} + +impl ProxyAcceptor { + pub(crate) fn new(inner: A, config: ProxyAcceptorConfig) -> Self { + Self { + inner, + config, + } + } +} + +impl Accept for ProxyAcceptor +where + A: Accept, + A::Stream: AsyncRead + Unpin + Send + 'static, +{ + type Future = AcceptorFuture; + type Service = AddExtension>; + type Stream = ProxiedStream; + + fn accept(&self, stream: I, service: S) -> Self::Future { + let inner_future = self.inner.accept(stream, service); + let config = self.config.clone(); + + AcceptorFuture::new(inner_future, config) + } +} + +/// Future returned by [`ProxiedStream::create_from_tokio()`]. +type StreamFuture = + Pin>> + Send>>; + +pin_project! { + #[project = AcceptorFutureProj] + pub(crate) enum AcceptorFuture { + Inner { + #[pin] + inner_future: F, + config: ProxyAcceptorConfig, + }, + Proxy { + #[pin] + stream_future: Timeout>, + service: Option, + }, + } +} + +impl AcceptorFuture { + fn new(inner_future: F, config: ProxyAcceptorConfig) -> Self { + Self::Inner { + inner_future, + config, + } + } +} + +impl Future for AcceptorFuture +where + F: Future>, + I: AsyncRead + Unpin + Send + 'static, +{ + type Output = std::io::Result<( + ProxiedStream, + AddExtension>, + )>; + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll { + loop { + match self.as_mut().project() { + AcceptorFutureProj::Inner { + inner_future, + config, + } => { + let Poll::Ready((stream, service)) = + inner_future.poll(cx)? + else { + return Poll::Pending; + }; + + let stream_future: StreamFuture = + Box::pin(ProxiedStream::create_from_tokio( + stream, + config.parse_config, + )); + let stream_future = + timeout(config.header_timeout, stream_future); + + self.set(AcceptorFuture::Proxy { + stream_future, + service: Some(service), + }); + } + AcceptorFutureProj::Proxy { + stream_future, + service, + } => { + let Poll::Ready(ret) = stream_future.poll(cx) else { + return Poll::Pending; + }; + + let stream = ret + .map_err(|e| { + warn!( + "Timed out waiting for HAProxy protocol header" + ); + std::io::Error::new(ErrorKind::TimedOut, e) + })? + .inspect_err(|error| { + warn!(%error, "Failed to parse HAProxy protocol header"); + })?; + + let service = + Extension(stream.proxy_header().clone().into_owned()) + .layer(service.take().expect( + "future should not be polled after ready", + )); + + return Poll::Ready(Ok((stream, service))); + } + } + } + } +} diff --git a/src/utils/room_version.rs b/src/utils/room_version.rs new file mode 100644 index 00000000..5e84da65 --- /dev/null +++ b/src/utils/room_version.rs @@ -0,0 +1,87 @@ +use ruma::RoomVersionId; + +use crate::Error; + +/// Properties that we care about in grapevine that differ between supported +/// room versions. +/// +/// This is similar to [`ruma::room_version_rules::RoomVersionRules`], except +/// that it only has the properties that are relevant to us instead of all +/// properties relevant to ruma state resolution. +/// +/// When branching for different room versions, prefer constructing a +/// `RoomVersion` and branching on its properties over branching based on the +/// [`RoomVersionId`] directly. If there is a relevant property that is not +/// already included in `RoomVersion`, add it. In particular, comparisons like +/// `room_version_id < RoomVersionId::V11` do not work, because room versions +/// do not have a defined order. Ruma implements `PartialOrd` on `RoomVersionId` +/// as a lexicographic string comparison, which is almost certainly not what you +/// want. +pub(crate) struct RoomVersion { + /// Whether `m.room.create` state events have a `creator` property. + /// + /// The `creator` property is always equivalent to the event `sender`, and + /// was [removed][spec] in v11. + /// + /// [spec]: https://spec.matrix.org/v1.11/rooms/v11/#remove-the-creator-property-of-mroomcreate-events + pub(crate) create_event_creator_prop: bool, + + /// Whether the `redacts` property of `m.room.redaction` events is a + /// property in the event `content` or a top-level property of the event. + /// + /// This property was [moved][spec] from top-level to `content` in v11. + /// + /// [spec]: https://spec.matrix.org/v1.11/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property + pub(crate) redaction_event_redacts_in_content: bool, +} + +// Allowed so that we can use struct-update syntax for incremental changes +// between versions even when all fields change. +#[allow(clippy::needless_update)] +mod known_versions { + use super::RoomVersion; + + pub(super) const V6: RoomVersion = RoomVersion { + create_event_creator_prop: true, + redaction_event_redacts_in_content: false, + }; + + pub(super) const V7: RoomVersion = RoomVersion { + ..V6 + }; + + pub(super) const V8: RoomVersion = RoomVersion { + ..V7 + }; + + pub(super) const V9: RoomVersion = RoomVersion { + ..V8 + }; + + pub(super) const V10: RoomVersion = RoomVersion { + ..V9 + }; + + pub(super) const V11: RoomVersion = RoomVersion { + create_event_creator_prop: false, + redaction_event_redacts_in_content: true, + ..V10 + }; +} + +/// May return an error for unsupported room versions. +impl TryFrom<&RoomVersionId> for RoomVersion { + type Error = Error; + + fn try_from(version: &RoomVersionId) -> Result { + match version { + RoomVersionId::V6 => Ok(known_versions::V6), + RoomVersionId::V7 => Ok(known_versions::V7), + RoomVersionId::V8 => Ok(known_versions::V8), + RoomVersionId::V9 => Ok(known_versions::V9), + RoomVersionId::V10 => Ok(known_versions::V10), + RoomVersionId::V11 => Ok(known_versions::V11), + _ => Err(Error::UnsupportedRoomVersion(version.clone())), + } + } +} diff --git a/tests/integrations/check_config.rs b/tests/integrations/check_config.rs new file mode 100644 index 00000000..119acd03 --- /dev/null +++ b/tests/integrations/check_config.rs @@ -0,0 +1,119 @@ +use std::{ + path::{Path, PathBuf}, + process::Output, +}; + +use assert_cmd::Command; + +type TestError = Box; +type TestResult = Result<(), TestError>; + +fn fixture_path

(name: P) -> PathBuf +where + P: AsRef, +{ + PathBuf::from("tests/integrations/fixtures/check_config").join(name) +} + +fn run

(file: P) -> Result +where + P: AsRef, +{ + Command::cargo_bin("grapevine")? + .args(["check-config", "--log-format=json", "-c"]) + .arg(fixture_path(file)) + .output() + .map_err(Into::into) +} + +macro_rules! make_snapshot_test { + ($name:ident, $description:expr, $fixture_name:expr $(,)?) => { + #[test] + fn $name() -> TestResult { + let output = run($fixture_name)?; + + let stdout = String::from_utf8(output.stdout)?; + let stderr = String::from_utf8(output.stderr)?; + let status_code = output.status.code(); + + insta::with_settings!({ + description => $description, + omit_expression => true, + snapshot_suffix => "stdout", + }, { + insta::assert_snapshot!(stdout); + }); + + let stderr_parse = serde_json::Deserializer::from_str(&stderr) + .into_iter::() + .collect::, _>>(); + insta::with_settings!({ + description => $description, + omit_expression => true, + snapshot_suffix => "stderr", + }, { + if let Ok(stderr_json) = stderr_parse { + insta::assert_json_snapshot!(stderr_json, { + ".*.timestamp" => "[timestamp]" + }); + } else { + insta::assert_snapshot!(stderr); + } + }); + + insta::with_settings!({ + description => $description, + omit_expression => true, + snapshot_suffix => "status_code", + }, { + insta::assert_debug_snapshot!(status_code); + }); + + Ok(()) + } + }; +} + +make_snapshot_test!(valid_config, "A normal config is valid", "valid.toml"); + +make_snapshot_test!( + minimal_valid_config, + "A configuration containing only the required keys is valid", + "minimal-valid.toml", +); + +make_snapshot_test!( + invalid_keys, + "A config with invalid keys fails", + "invalid-keys.toml", +); + +make_snapshot_test!( + invalid_values, + "A config with invalid values fails", + "invalid-values.toml", +); + +make_snapshot_test!( + overlapping_paths_equal, + "A config with equal paths fails", + "equal-paths.toml", +); + +make_snapshot_test!( + overlapping_paths_media, + "A config with the media path inside the database path fails", + "media-in-database.toml", +); + +make_snapshot_test!( + overlapping_paths_database, + "A config with the database path inside the media path fails", + "database-in-media.toml", +); + +make_snapshot_test!( + unix_socket, + "A config listening to a Unix socket is valid", + "unix-socket.toml", +); diff --git a/tests/integrations/fixtures/check_config/database-in-media.toml b/tests/integrations/fixtures/check_config/database-in-media.toml new file mode 100644 index 00000000..db50a5b8 --- /dev/null +++ b/tests/integrations/fixtures/check_config/database-in-media.toml @@ -0,0 +1,12 @@ +server_name = "example.com" + +[server_discovery] +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/c/a" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/c" diff --git a/tests/integrations/fixtures/check_config/dirs/a/.gitignore b/tests/integrations/fixtures/check_config/dirs/a/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/tests/integrations/fixtures/check_config/dirs/a/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/tests/integrations/fixtures/check_config/dirs/b/.gitignore b/tests/integrations/fixtures/check_config/dirs/b/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/tests/integrations/fixtures/check_config/dirs/b/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/tests/integrations/fixtures/check_config/dirs/c/a/.gitignore b/tests/integrations/fixtures/check_config/dirs/c/a/.gitignore new file mode 100644 index 00000000..d6b7ef32 --- /dev/null +++ b/tests/integrations/fixtures/check_config/dirs/c/a/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/tests/integrations/fixtures/check_config/equal-paths.toml b/tests/integrations/fixtures/check_config/equal-paths.toml new file mode 100644 index 00000000..1271b2d3 --- /dev/null +++ b/tests/integrations/fixtures/check_config/equal-paths.toml @@ -0,0 +1,12 @@ +server_name = "example.com" + +[server_discovery] +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/a" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/a" diff --git a/tests/integrations/fixtures/check_config/invalid-keys.toml b/tests/integrations/fixtures/check_config/invalid-keys.toml new file mode 100644 index 00000000..4f9ad938 --- /dev/null +++ b/tests/integrations/fixtures/check_config/invalid-keys.toml @@ -0,0 +1,2 @@ +some_name = "example.com" +prort = 6167 diff --git a/tests/integrations/fixtures/check_config/invalid-values.toml b/tests/integrations/fixtures/check_config/invalid-values.toml new file mode 100644 index 00000000..54f9ff6c --- /dev/null +++ b/tests/integrations/fixtures/check_config/invalid-values.toml @@ -0,0 +1,2 @@ +server_name = 6667 +port = "ircd" diff --git a/tests/integrations/fixtures/check_config/media-in-database.toml b/tests/integrations/fixtures/check_config/media-in-database.toml new file mode 100644 index 00000000..66e94257 --- /dev/null +++ b/tests/integrations/fixtures/check_config/media-in-database.toml @@ -0,0 +1,12 @@ +server_name = "example.com" + +[server_discovery] +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/c" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/c/a" diff --git a/tests/integrations/fixtures/check_config/minimal-valid.toml b/tests/integrations/fixtures/check_config/minimal-valid.toml new file mode 100644 index 00000000..330e0d57 --- /dev/null +++ b/tests/integrations/fixtures/check_config/minimal-valid.toml @@ -0,0 +1,12 @@ +server_name = "example.com" + +[server_discovery] +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/a" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/b" diff --git a/tests/integrations/fixtures/check_config/unix-socket.toml b/tests/integrations/fixtures/check_config/unix-socket.toml new file mode 100644 index 00000000..fe088d5d --- /dev/null +++ b/tests/integrations/fixtures/check_config/unix-socket.toml @@ -0,0 +1,14 @@ +server_name = "example.com" +listen = [{ type = "unix", path = "/tmp/grapevine.sock" }] + +[server_discovery] +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/a" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/b" + diff --git a/tests/integrations/fixtures/check_config/valid.toml b/tests/integrations/fixtures/check_config/valid.toml new file mode 100644 index 00000000..cb0fda11 --- /dev/null +++ b/tests/integrations/fixtures/check_config/valid.toml @@ -0,0 +1,25 @@ +server_name = "example.com" + +allow_registration = false + +max_request_size = 20_000_000 + +[server_discovery] +server.authority = "matrix.example.com:443" +client.base_url = "https://matrix.example.com" + +[database] +backend = "rocksdb" +path = "tests/integrations/fixtures/check_config/dirs/a" + +[media.backend] +type = "filesystem" +path = "tests/integrations/fixtures/check_config/dirs/b" + +[federation] +enable = true +trusted_servers = ["matrix.org"] + +[[listen]] + type="tcp" + address = "0.0.0.0" diff --git a/tests/integrations/main.rs b/tests/integrations/main.rs new file mode 100755 index 00000000..908b0bac --- /dev/null +++ b/tests/integrations/main.rs @@ -0,0 +1,4 @@ +// +#![allow(clippy::tests_outside_test_module)] + +mod check_config; diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_keys@status_code.snap b/tests/integrations/snapshots/integrations__check_config__invalid_keys@status_code.snap new file mode 100644 index 00000000..2278288a --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_keys@status_code.snap @@ -0,0 +1,8 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid keys fails +snapshot_kind: text +--- +Some( + 1, +) diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_keys@stderr.snap b/tests/integrations/snapshots/integrations__check_config__invalid_keys@stderr.snap new file mode 100644 index 00000000..83cd650b --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_keys@stderr.snap @@ -0,0 +1,11 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid keys fails +--- +Error: failed to validate configuration + Caused by: failed to parse configuration file "tests/integrations/fixtures/check_config/invalid-keys.toml" + Caused by: TOML parse error at line 1, column 1 + | +1 | some_name = "example.com" + | ^^^^^^^^^ +unknown field `some_name`, expected one of `conduit_compat`, `listen`, `tls`, `server_name`, `server_discovery`, `database`, `media`, `federation`, `cache`, `cleanup_second_interval`, `max_request_size`, `allow_registration`, `registration_token`, `allow_encryption`, `allow_room_creation`, `default_room_version`, `proxy`, `jwt_secret`, `observability`, `turn`, `emergency_password` diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_keys@stdout.snap b/tests/integrations/snapshots/integrations__check_config__invalid_keys@stdout.snap new file mode 100644 index 00000000..b17d0451 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_keys@stdout.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid keys fails +snapshot_kind: text +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_values@status_code.snap b/tests/integrations/snapshots/integrations__check_config__invalid_values@status_code.snap new file mode 100644 index 00000000..27ae9ec0 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_values@status_code.snap @@ -0,0 +1,8 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid values fails +snapshot_kind: text +--- +Some( + 1, +) diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_values@stderr.snap b/tests/integrations/snapshots/integrations__check_config__invalid_values@stderr.snap new file mode 100644 index 00000000..5e46f095 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_values@stderr.snap @@ -0,0 +1,12 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid values fails +snapshot_kind: text +--- +Error: failed to validate configuration + Caused by: failed to parse configuration file "tests/integrations/fixtures/check_config/invalid-values.toml" + Caused by: TOML parse error at line 1, column 15 + | +1 | server_name = 6667 + | ^^^^ +invalid type: integer `6667`, expected a string diff --git a/tests/integrations/snapshots/integrations__check_config__invalid_values@stdout.snap b/tests/integrations/snapshots/integrations__check_config__invalid_values@stdout.snap new file mode 100644 index 00000000..a67fbf13 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__invalid_values@stdout.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A config with invalid values fails +snapshot_kind: text +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@status_code.snap b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@status_code.snap new file mode 100644 index 00000000..c58f09d0 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@status_code.snap @@ -0,0 +1,8 @@ +--- +source: tests/integrations/check_config.rs +description: A configuration containing only the required keys is valid +snapshot_kind: text +--- +Some( + 0, +) diff --git a/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stderr.snap b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stderr.snap new file mode 100644 index 00000000..0a90a7d2 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stderr.snap @@ -0,0 +1,15 @@ +--- +source: tests/integrations/check_config.rs +description: A configuration containing only the required keys is valid +snapshot_kind: text +--- +[ + { + "fields": { + "message": "Configuration looks good" + }, + "level": "INFO", + "target": "grapevine::cli::check_config", + "timestamp": "[timestamp]" + } +] diff --git a/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stdout.snap b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stdout.snap new file mode 100644 index 00000000..ca3c5651 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__minimal_valid_config@stdout.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A configuration containing only the required keys is valid +snapshot_kind: text +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@status_code.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@status_code.snap new file mode 100644 index 00000000..d2d33fe4 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@status_code.snap @@ -0,0 +1,7 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the database path inside the media path fails +--- +Some( + 1, +) diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stderr.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stderr.snap new file mode 100644 index 00000000..8b550414 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stderr.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the database path inside the media path fails +--- +Error: failed to validate configuration + Caused by: database and media paths overlap diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stdout.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stdout.snap new file mode 100644 index 00000000..065d47d1 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_database@stdout.snap @@ -0,0 +1,5 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the database path inside the media path fails +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@status_code.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@status_code.snap new file mode 100644 index 00000000..ac822b67 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@status_code.snap @@ -0,0 +1,7 @@ +--- +source: tests/integrations/check_config.rs +description: A config with equal paths fails +--- +Some( + 1, +) diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stderr.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stderr.snap new file mode 100644 index 00000000..c1e68376 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stderr.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A config with equal paths fails +--- +Error: failed to validate configuration + Caused by: database and media paths overlap diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stdout.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stdout.snap new file mode 100644 index 00000000..acbd1296 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_equal@stdout.snap @@ -0,0 +1,5 @@ +--- +source: tests/integrations/check_config.rs +description: A config with equal paths fails +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@status_code.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@status_code.snap new file mode 100644 index 00000000..5240e180 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@status_code.snap @@ -0,0 +1,7 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the media path inside the database path fails +--- +Some( + 1, +) diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stderr.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stderr.snap new file mode 100644 index 00000000..2402b8b4 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stderr.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the media path inside the database path fails +--- +Error: failed to validate configuration + Caused by: database and media paths overlap diff --git a/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stdout.snap b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stdout.snap new file mode 100644 index 00000000..8d4c65a1 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__overlapping_paths_media@stdout.snap @@ -0,0 +1,5 @@ +--- +source: tests/integrations/check_config.rs +description: A config with the media path inside the database path fails +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__unix_socket@status_code.snap b/tests/integrations/snapshots/integrations__check_config__unix_socket@status_code.snap new file mode 100644 index 00000000..2866558c --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__unix_socket@status_code.snap @@ -0,0 +1,7 @@ +--- +source: tests/integrations/check_config.rs +description: A config listening to a Unix socket is valid +--- +Some( + 0, +) diff --git a/tests/integrations/snapshots/integrations__check_config__unix_socket@stderr.snap b/tests/integrations/snapshots/integrations__check_config__unix_socket@stderr.snap new file mode 100644 index 00000000..93e08914 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__unix_socket@stderr.snap @@ -0,0 +1,14 @@ +--- +source: tests/integrations/check_config.rs +description: A config listening to a Unix socket is valid +--- +[ + { + "fields": { + "message": "Configuration looks good" + }, + "level": "INFO", + "target": "grapevine::cli::check_config", + "timestamp": "[timestamp]" + } +] diff --git a/tests/integrations/snapshots/integrations__check_config__unix_socket@stdout.snap b/tests/integrations/snapshots/integrations__check_config__unix_socket@stdout.snap new file mode 100644 index 00000000..0b9549b9 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__unix_socket@stdout.snap @@ -0,0 +1,5 @@ +--- +source: tests/integrations/check_config.rs +description: A config listening to a Unix socket is valid +--- + diff --git a/tests/integrations/snapshots/integrations__check_config__valid_config@status_code.snap b/tests/integrations/snapshots/integrations__check_config__valid_config@status_code.snap new file mode 100644 index 00000000..529e7e1c --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__valid_config@status_code.snap @@ -0,0 +1,8 @@ +--- +source: tests/integrations/check_config.rs +description: A normal config is valid +snapshot_kind: text +--- +Some( + 0, +) diff --git a/tests/integrations/snapshots/integrations__check_config__valid_config@stderr.snap b/tests/integrations/snapshots/integrations__check_config__valid_config@stderr.snap new file mode 100644 index 00000000..b5651e25 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__valid_config@stderr.snap @@ -0,0 +1,15 @@ +--- +source: tests/integrations/check_config.rs +description: A normal config is valid +snapshot_kind: text +--- +[ + { + "fields": { + "message": "Configuration looks good" + }, + "level": "INFO", + "target": "grapevine::cli::check_config", + "timestamp": "[timestamp]" + } +] diff --git a/tests/integrations/snapshots/integrations__check_config__valid_config@stdout.snap b/tests/integrations/snapshots/integrations__check_config__valid_config@stdout.snap new file mode 100644 index 00000000..deb44a46 --- /dev/null +++ b/tests/integrations/snapshots/integrations__check_config__valid_config@stdout.snap @@ -0,0 +1,6 @@ +--- +source: tests/integrations/check_config.rs +description: A normal config is valid +snapshot_kind: text +--- + diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list deleted file mode 100644 index 99091989..00000000 --- a/tests/sytest/are-we-synapse-yet.list +++ /dev/null @@ -1,866 +0,0 @@ -reg GET /register yields a set of flows -reg POST /register can create a user -reg POST /register downcases capitals in usernames -reg POST /register returns the same device_id as that in the request -reg POST /register rejects registration of usernames with '!' -reg POST /register rejects registration of usernames with '"' -reg POST /register rejects registration of usernames with ':' -reg POST /register rejects registration of usernames with '?' -reg POST /register rejects registration of usernames with '\' -reg POST /register rejects registration of usernames with '@' -reg POST /register rejects registration of usernames with '[' -reg POST /register rejects registration of usernames with ']' -reg POST /register rejects registration of usernames with '{' -reg POST /register rejects registration of usernames with '|' -reg POST /register rejects registration of usernames with '}' -reg POST /register rejects registration of usernames with '£' -reg POST /register rejects registration of usernames with 'é' -reg POST /register rejects registration of usernames with '\n' -reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON -log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected -log Interactive authentication types include SSO -log Can perform interactive authentication with SSO -log The user must be consistent through an interactive authentication session with SSO -log The operation must be consistent through an interactive authentication session -v1s GET /events initially -v1s GET /initialSync initially -csa Version responds 200 OK with valid structure -pro PUT /profile/:user_id/displayname sets my name -pro GET /profile/:user_id/displayname publicly accessible -pro PUT /profile/:user_id/avatar_url sets my avatar -pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} -dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields -dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session -dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence -crm POST /createRoom makes a public room -crm POST /createRoom makes a private room -crm POST /createRoom makes a private room with invites -crm POST /createRoom makes a room with a name -crm POST /createRoom makes a room with a topic -syn Can /sync newly created room -crm POST /createRoom creates a room with the given version -crm POST /createRoom rejects attempts to create rooms with numeric versions -crm POST /createRoom rejects attempts to create rooms with unknown versions -crm POST /createRoom ignores attempts to set the room version via creation_content -mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room -ali GET /directory/room/:room_alias yields room ID -mem GET /joined_rooms lists newly-created room -rst POST /rooms/:room_id/state/m.room.name sets name -rst GET /rooms/:room_id/state/m.room.name gets name -rst POST /rooms/:room_id/state/m.room.topic sets topic -rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state -crm POST /createRoom with creation content -ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases -jon POST /rooms/:room_id/join can join a room -jon POST /join/:room_alias can join a room -jon POST /join/:room_id can join a room -jon POST /join/:room_id can join a room with custom content -jon POST /join/:room_alias can join a room with custom content -lev POST /rooms/:room_id/leave can leave a room -inv POST /rooms/:room_id/invite can send an invite -ban POST /rooms/:room_id/ban can ban a user -snd POST /rooms/:room_id/send/:event_type sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification -typ Typing notifications don't leak (3 subtests) -rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels -rst PUT /rooms/:room_id/state/m.room.power_levels can set levels -rst PUT power_levels should not explode if the old power levels were empty -rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts -red POST /rooms/:room_id/read_markers can create read marker -med POST /media/r0/upload can create an upload -med GET /media/r0/download can fetch the value again -cap GET /capabilities is present and well formed for registered user -cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login -reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices -lox Request to logout with invalid an access token is rejected -lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password -acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events -crm Room creation reports m.room.create to myself -crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent -syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms -syn Local room members see posted message events -v1s Fetching eventstream a second time doesn't yield the message again -syn Local non-members don't see posted message events -get Local room members can get room messages -f,syn Remote room members also see posted message events -f,get Remote room members can get room messages -get Message history can be paginated -f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired -ali Room aliases can contain Unicode -f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases -ali Regular users can add and delete aliases in the default room configuration -ali Regular users can add and delete aliases when m.room.aliases is restricted -ali Deleting a non-existent alias should return a 404 -ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel -plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) -plv Unprivileged users can set m.room.topic if it only needs level 0 -plv Users cannot set ban powerlevel higher than their own (2 subtests) -plv Users cannot set kick powerlevel higher than their own (2 subtests) -plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) -mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) -syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join -3pd 3pid invite join with wrong but valid signature are rejected -3pd 3pid invite join valid signature but revoked keys are rejected -3pd 3pid invite join valid signature but unreachable ID server are rejected -gst Guest user cannot call /events globally -gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access -gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user -gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis -mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events -gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users -gst Guest non-joined user cannot call /events on shared room -gst Guest non-joined user cannot call /events on invited room -gst Guest non-joined user cannot call /events on joined room -gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room -gst Guest non-joined users can get state for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms -gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms after leaving -gst Guest non-joined users cannot send messages to guest_access rooms if not joined -gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined -gst Guest users can sync from default guest_access rooms if joined -ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users -ath Real non-joined user cannot call /events on shared room -ath Real non-joined user cannot call /events on invited room -ath Real non-joined user cannot call /events on joined room -ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room -ath Real non-joined users can get state for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms -ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving -ath Real non-joined users cannot send messages to guest_access rooms if not joined -ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined -ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries -f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from -fgt Can't forget room you're still in -fgt Can re-join room if re-invited -ath Only original members of the room can see messages from erased users -mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works -get /event/ on joined room works -get /event/ on non world readable room does not work -get /event/ does not allow access to events before the user joined -mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct -f,pub Can get remote public room list -pub Can paginate public room list -pub Can search public room list -syn Can create filter -syn Can download filter -syn Can sync -syn Can sync a joined room -syn Full state sync includes joined rooms -syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id -syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync -syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync -syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync -syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync -syn Current state appears in timeline in private history -syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after -syn Rooms a user is invited to appear in an initial sync -syn Rooms a user is invited to appear in an incremental sync -syn Newly joined room is included in an incremental sync after invite -syn Sync can be polled for updates -syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync -syn Newly left rooms appear in the leave section of incremental sync -syn We should see our own leave event, even if history_visibility is restricted (SYN-662) -syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -syn Newly left rooms appear in the leave section of gapped sync -syn Previously left rooms don't appear in the leave section of sync -syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Typing events appear in initial sync -syn Typing events appear in incremental sync -syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated -syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 -rmv User can create and send/receive messages in a room with version 1 (2 subtests) -rmv local user can join room with version 1 -rmv User can invite local user to room with version 1 -rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 -rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 -rmv User can create and send/receive messages in a room with version 2 (2 subtests) -rmv local user can join room with version 2 -rmv User can invite local user to room with version 2 -rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 -rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 -rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 -rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 -rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -rmv User can create and send/receive messages in a room with version 6 -rmv User can create and send/receive messages in a room with version 6 (2 subtests) -rmv local user can join room with version 6 -rmv User can invite local user to room with version 6 -rmv remote user can join room with version 6 -rmv User can invite remote user to room with version 6 -rmv Remote user can backfill in a room with version 6 -rmv Can reject invites over federation for rooms with version 6 -rmv Can receive redactions from regular users over federation in room version 6 -rmv Inbound federation rejects invites which include invalid JSON for room version 6 -rmv Outbound federation rejects invite response which include invalid JSON for room version 6 -rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 -rmv Server rejects invalid JSON in a version 6 room -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys -dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dkb Can create backup version -dkb Can update backup version -dkb Responds correctly when backup is empty -dkb Can backup keys -dkb Can update keys with better versions -dkb Will not update keys with worse versions -dkb Will not back up to an old backup version -dkb Can delete backup -dkb Deleted & recreated backups are empty -dkb Can create more than 10 backup versions -xsk Can upload self-signing keys -xsk Fails to upload self-signing keys with no auth -xsk Fails to upload self-signing key without master key -xsk Changing master key notifies local users -xsk Changing user-signing key notifies local users -f,xsk can fetch self-signing keys over federation -f,xsk uploading self-signing key notifies over federation -f,xsk uploading signed devices gets propagated over federation -tag Can add tag -tag Can remove tag -tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync -tag Tags appear in an initial v2 /sync -tag Newly updated tags appear in an incremental v2 /sync -tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events -acc Can add account data -acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync -acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -fsd Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -fsd Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync -fky Checking local federation server -fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected -fqu Outbound federation can query profile data -fqu Inbound federation can query profile data -fqu Outbound federation can query room alias directory -fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join -fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join -fsj Inbound federation can receive v2 /send_join -fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers -fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail -fsj Inbound: send_join rejects invalid JSON for room version 6 -fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room -fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room -fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fau Users cannot set notifications powerlevel higher than their own -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name -med Can upload without a file name -med Can download without a file name locally -f,med Can download without a file name over federation -med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name -med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login -app AS cannot create users outside its own namespace -app Regular users cannot register within the AS namespace -app AS can make room aliases -app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users -app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering -app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules -psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule -psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers -psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules -psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 -psh Trying to get push rules with unknown rule_id fails with 404 -psh Rooms with names are correctly named in pushes -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) -gst Events come down the correct room -pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -std Can send a to-device message to two users which both receive it using /sync -fme Outbound federation will ignore a missing event with bad JSON for room version 6 -fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 -jso Invalid JSON integers -jso Invalid JSON floats -jso Invalid JSON special values -inv Can invite users to invite-only rooms (2 subtests) -plv setting 'm.room.name' respects room powerlevel (2 subtests) -psh Messages that notify from another user increment notification_count -psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count -dvk Can claim one time key using POST (2 subtests) -fdk Can query remote device keys using POST (1 subtests) -fdk Can claim remote one time key using POST (2 subtests) -fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py deleted file mode 100755 index 3d21fa41..00000000 --- a/tests/sytest/are-we-synapse-yet.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import division -import argparse -import re -import sys - -# Usage: $ ./are-we-synapse-yet.py [-v] results.tap -# This script scans a results.tap file from Dendrite's CI process and spits out -# a rating of how close we are to Synapse parity, based purely on SyTests. -# The main complexity is grouping tests sensibly into features like 'Registration' -# and 'Federation'. Then it just checks the ones which are passing and calculates -# percentages for each group. Produces results like: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# -# or in verbose mode: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -# -# You can also tack `-v` on to see exactly which tests each category falls under. - -test_mappings = { - "nsp": "Non-Spec API", - "unk": "Unknown API (no group specified)", - "app": "Application Services API", - "f": "Federation", # flag to mark test involves federation - - "federation_apis": { - "fky": "Key API", - "fsj": "send_join API", - "fmj": "make_join API", - "fsl": "send_leave API", - "fiv": "Invite API", - "fqu": "Query API", - "frv": "room versions", - "fau": "Auth", - "fbk": "Backfill API", - "fme": "get_missing_events API", - "fst": "State APIs", - "fpb": "Public Room API", - "fdk": "Device Key APIs", - "fed": "Federation API", - "fsd": "Send-to-Device APIs", - }, - - "client_apis": { - "reg": "Registration", - "log": "Login", - "lox": "Logout", - "v1s": "V1 CS APIs", - "csa": "Misc CS APIs", - "pro": "Profile", - "dev": "Devices", - "dvk": "Device Keys", - "dkb": "Device Key Backup", - "xsk": "Cross-signing Keys", - "pre": "Presence", - "crm": "Create Room", - "syn": "Sync API", - "rmv": "Room Versions", - "rst": "Room State APIs", - "pub": "Public Room APIs", - "mem": "Room Membership", - "ali": "Room Aliases", - "jon": "Joining Rooms", - "lev": "Leaving Rooms", - "inv": "Inviting users to Rooms", - "ban": "Banning users", - "snd": "Sending events", - "get": "Getting events for Rooms", - "rct": "Receipts", - "red": "Read markers", - "med": "Media APIs", - "cap": "Capabilities API", - "typ": "Typing API", - "psh": "Push APIs", - "acc": "Account APIs", - "eph": "Ephemeral Events", - "plv": "Power Levels", - "xxx": "Redaction", - "3pd": "Third-Party ID APIs", - "gst": "Guest APIs", - "ath": "Room Auth", - "fgt": "Forget APIs", - "ctx": "Context APIs", - "upg": "Room Upgrade APIs", - "tag": "Tagging APIs", - "sch": "Search APIs", - "oid": "OpenID API", - "std": "Send-to-Device APIs", - "adm": "Server Admin API", - "ign": "Ignore Users", - "udr": "User Directory APIs", - "jso": "Enforced canonical JSON", - }, -} - -# optional 'not ' with test number then anything but '#' -re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") - -# Parses lines like the following: -# -# SUCCESS: ok 3 POST /register downcases capitals in usernames -# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version -# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts -# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail -# -# Only SUCCESS lines are treated as success, the rest are not implemented. -# -# Returns a dict like: -# { name: "...", ok: True } -def parse_test_line(line): - if not line.startswith("ok ") and not line.startswith("not ok "): - return - re_match = re_testname.match(line) - test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() - test_pass = False - if line.startswith("ok ") and not "# skip " in line: - test_pass = True - return { - "name": test_name, - "ok": test_pass, - } - -# Prints the stats for a complete section. -# header_name => "Client-Server APIs" -# gid_to_tests => { gid: { : True|False }} -# gid_to_name => { gid: "Group Name" } -# verbose => True|False -# Produces: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# or in verbose mode: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -def print_stats(header_name, gid_to_tests, gid_to_name, verbose): - subsections = [] # Registration: 100% (13/13 tests) - subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] - total_passing = 0 - total_tests = 0 - for gid, tests in gid_to_tests.items(): - group_total = len(tests) - if group_total == 0: - continue - group_passing = 0 - test_names_and_marks = [] - for name, passing in tests.items(): - if passing: - group_passing += 1 - test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - - total_tests += group_total - total_passing += group_passing - pct = "{0:.0f}%".format(group_passing/group_total * 100) - line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) - subsections.append(line) - subsection_test_names[line] = test_names_and_marks - - pct = "{0:.0f}%".format(total_passing/total_tests * 100) - print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) - print("-" * (len(header_name)+1)) - for line in subsections: - print(" %s" % (line,)) - if verbose: - for test_name_and_pass_mark in subsection_test_names[line]: - print(" %s" % (test_name_and_pass_mark,)) - print("") - print("") - -def main(results_tap_path, verbose): - # Load up test mappings - test_name_to_group_id = {} - fed_tests = set() - client_tests = set() - with open("./are-we-synapse-yet.list", "r") as f: - for line in f.readlines(): - test_name = " ".join(line.split(" ")[1:]).strip() - groups = line.split(" ")[0].split(",") - for gid in groups: - if gid == "f" or gid in test_mappings["federation_apis"]: - fed_tests.add(test_name) - else: - client_tests.add(test_name) - if gid == "f": - continue # we expect another group ID - test_name_to_group_id[test_name] = gid - - # parse results.tap - summary = { - "client": { - # gid: { - # test_name: OK - # } - }, - "federation": { - # gid: { - # test_name: OK - # } - }, - "appservice": { - "app": {}, - }, - "nonspec": { - "nsp": {}, - "unk": {} - }, - } - with open(results_tap_path, "r") as f: - for line in f.readlines(): - test_result = parse_test_line(line) - if not test_result: - continue - name = test_result["name"] - group_id = test_name_to_group_id.get(name) - if not group_id: - summary["nonspec"]["unk"][name] = test_result["ok"] - if group_id == "nsp": - summary["nonspec"]["nsp"][name] = test_result["ok"] - elif group_id == "app": - summary["appservice"]["app"][name] = test_result["ok"] - elif group_id in test_mappings["federation_apis"]: - group = summary["federation"].get(group_id, {}) - group[name] = test_result["ok"] - summary["federation"][group_id] = group - elif group_id in test_mappings["client_apis"]: - group = summary["client"].get(group_id, {}) - group[name] = test_result["ok"] - summary["client"][group_id] = group - - print("Are We Synapse Yet?") - print("===================") - print("") - print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) - print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) - print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("tap_file", help="path to results.tap") - parser.add_argument("-v", action="store_true", help="show individual test names in output") - args = parser.parse_args() - main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh deleted file mode 100755 index 320d4ebd..00000000 --- a/tests/sytest/show-expected-fail-tests.sh +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/bash -# -# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) -# and checks whether a test name that exists in the whitelist (that should pass), failed or not. -# -# An optional blacklist file can be added, also containing test names, where if a test name is -# present, the script will not error even if the test is in the whitelist file and failed -# -# For each of these files, lines starting with '#' are ignored. -# -# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] - -results_file=$1 -whitelist_file=$2 -blacklist_file=$3 - -fail_build=0 - -if [ $# -lt 2 ]; then - echo "Usage: $0 results.tap whitelist [blacklist]" - exit 1 -fi - -if [ ! -f "$results_file" ]; then - echo "ERROR: Specified results file '${results_file}' doesn't exist." - fail_build=1 -fi - -if [ ! -f "$whitelist_file" ]; then - echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." - fail_build=1 -fi - -blacklisted_tests=() - -# Check if a blacklist file was provided -if [ $# -eq 3 ]; then - # Read test blacklist file - if [ ! -f "$blacklist_file" ]; then - echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." - fail_build=1 - fi - - # Read each line, ignoring those that start with '#' - blacklisted_tests="" - search_non_comments=$(grep -v '^#' ${blacklist_file}) - while read -r line ; do - # Record the blacklisted test name - blacklisted_tests+=("${line}") - done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop -fi - -[ "$fail_build" = 0 ] || exit 1 - -passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') -tests_to_add="" -already_in_whitelist="" - -while read -r test_name; do - # Ignore empty lines - [ "${test_name}" = "" ] && continue - - grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 - if [ "$?" != "0" ]; then - # Check if this test name is blacklisted - if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then - # Don't notify about this test - continue - fi - - # Append this test_name to the existing list - tests_to_add="${tests_to_add}${test_name}\n" - fail_build=1 - else - already_in_whitelist="${already_in_whitelist}${test_name}\n" - fi -done <<< "${passed_but_expected_fail}" - -# TODO: Check that the same test doesn't exist in both the whitelist and blacklist -# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist - -# Trim test output strings -tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") -already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") - -# Format output with markdown for buildkite annotation rendering purposes -if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then - echo "### 📜 SyTest Whitelist Maintenance" -fi - -if [ -n "${tests_to_add}" ]; then - echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" - echo "\`\`\`" - echo -e "${tests_to_add}" - echo "\`\`\`" -fi - -if [ -n "${already_in_whitelist}" ]; then - echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" - echo "\`\`\`" - echo -e "${already_in_whitelist}" - echo "\`\`\`" -fi - -exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist deleted file mode 100644 index 009de225..00000000 --- a/tests/sytest/sytest-blacklist +++ /dev/null @@ -1,7 +0,0 @@ -# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged -POST /createRoom makes a public room -# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room -POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist deleted file mode 100644 index 1c969dba..00000000 --- a/tests/sytest/sytest-whitelist +++ /dev/null @@ -1,516 +0,0 @@ -/event/ does not allow access to events before the user joined -/event/ on joined room works -/event/ on non world readable room does not work -/joined_members return joined members -/joined_rooms returns only joined rooms -/whois -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -3pid invite join with wrong but valid signature are rejected -A change to displayname should appear in incremental /sync -A full_state incremental update returns all state -A full_state incremental update returns only recent timeline -A message sent after an initial sync appears in the timeline of an incremental sync. -A next_batch token can be used in the v1 messages API -A pair of events which redact each other should be ignored -A pair of servers can establish a join in a v2 room -A prev_batch token can be used in the v1 messages API -AS can create a user -AS can create a user with an underscore -AS can create a user with inhibit_login -AS can set avatar for ghosted users -AS can set displayname for ghosted users -AS can't set displayname for random users -AS cannot create users outside its own namespace -AS user (not ghost) can join room without registering -AS user (not ghost) can join room without registering, with user_id query param -After changing password, a different session no longer works by default -After changing password, can log in with new password -After changing password, can't log in with old password -After changing password, different sessions can optionally be kept -After changing password, existing session still works -After deactivating account, can't log in with an email -After deactivating account, can't log in with password -Alias creators can delete alias with no ops -Alias creators can delete canonical alias with no ops -Alternative server names do not cause a routing loop -An event which redacts an event in a different room should be ignored -An event which redacts itself should be ignored -Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -Backfill checks the events requested belong to the room -Backfill works correctly with history visibility set to joined -Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -Banned servers cannot /event_auth -Banned servers cannot /invite -Banned servers cannot /make_join -Banned servers cannot /make_leave -Banned servers cannot /send_join -Banned servers cannot /send_leave -Banned servers cannot backfill -Banned servers cannot get missing events -Banned servers cannot get room state -Banned servers cannot get room state ids -Banned servers cannot send events -Banned user is kicked and may not rejoin until unbanned -Both GET and PUT work -Can /sync newly created room -Can add account data -Can add account data to room -Can add tag -Can claim one time key using POST -Can claim remote one time key using POST -Can create filter -Can deactivate account -Can delete canonical alias -Can download file 'ascii' -Can download file 'name with spaces' -Can download file 'name;with;semicolons' -Can download filter -Can download specifying a different ASCII file name -Can download specifying a different Unicode file name -Can download with Unicode file name locally -Can download with Unicode file name over federation -Can download without a file name locally -Can download without a file name over federation -Can forget room you've been kicked from -Can get 'm.room.name' state for a departed room (SPEC-216) -Can get account data without syncing -Can get remote public room list -Can get room account data without syncing -Can get rooms/{roomId}/members -Can get rooms/{roomId}/members for a departed room (SPEC-216) -Can get rooms/{roomId}/state for a departed room (SPEC-216) -Can invite users to invite-only rooms -Can list tags for a room -Can logout all devices -Can logout current device -Can paginate public room list -Can pass a JSON filter as a query parameter -Can query device keys using POST -Can query remote device keys using POST -Can query specific device keys using POST -Can re-join room if re-invited -Can read configuration endpoint -Can receive redactions from regular users over federation in room version 1 -Can receive redactions from regular users over federation in room version 2 -Can receive redactions from regular users over federation in room version 3 -Can receive redactions from regular users over federation in room version 4 -Can receive redactions from regular users over federation in room version 5 -Can receive redactions from regular users over federation in room version 6 -Can recv a device message using /sync -Can recv a device message using /sync -Can recv device messages over federation -Can recv device messages until they are acknowledged -Can recv device messages until they are acknowledged -Can reject invites over federation for rooms with version 1 -Can reject invites over federation for rooms with version 2 -Can reject invites over federation for rooms with version 3 -Can reject invites over federation for rooms with version 4 -Can reject invites over federation for rooms with version 5 -Can reject invites over federation for rooms with version 6 -Can remove tag -Can search public room list -Can send a message directly to a device using PUT /sendToDevice -Can send a message directly to a device using PUT /sendToDevice -Can send a to-device message to two users which both receive it using /sync -Can send image in room message -Can send messages with a wildcard device id -Can send messages with a wildcard device id -Can send messages with a wildcard device id to two devices -Can send messages with a wildcard device id to two devices -Can sync -Can sync a joined room -Can sync a room with a message with a transaction id -Can sync a room with a single message -Can upload device keys -Can upload with ASCII file name -Can upload with Unicode file name -Can upload without a file name -Can't deactivate account with wrong password -Can't forget room you're still in -Changes to state are included in an gapped incremental sync -Changes to state are included in an incremental sync -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 -Checking local federation server -Creators can delete alias -Current state appears in timeline in private history -Current state appears in timeline in private history with many messages before -DELETE /device/{deviceId} -DELETE /device/{deviceId} requires UI auth user to match device owner -DELETE /device/{deviceId} with no body gives a 401 -Deleted tags appear in an incremental v2 /sync -Deleting a non-existent alias should return a 404 -Device list doesn't change if remote server is down -Device messages over federation wake up /sync -Device messages wake up /sync -Device messages wake up /sync -Device messages with the same txn_id are deduplicated -Device messages with the same txn_id are deduplicated -Enabling an unknown default rule fails with 404 -Event size limits -Event with an invalid signature in the send_join response should not cause room join to fail -Events come down the correct room -Events whose auth_events are in the wrong room do not mess up the room state -Existing members see new members' join events -Federation key API allows unsigned requests for keys -Federation key API can act as a notary server via a GET request -Federation key API can act as a notary server via a POST request -Federation rejects inbound events where the prev_events cannot be found -Fetching eventstream a second time doesn't yield the message again -Forgetting room does not show up in v2 /sync -Full state sync includes joined rooms -GET /capabilities is present and well formed for registered user -GET /device/{deviceId} -GET /device/{deviceId} gives a 404 for unknown devices -GET /devices -GET /directory/room/:room_alias yields room ID -GET /events initially -GET /events with negative 'limit' -GET /events with non-numeric 'limit' -GET /events with non-numeric 'timeout' -GET /initialSync initially -GET /joined_rooms lists newly-created room -GET /login yields a set of flows -GET /media/r0/download can fetch the value again -GET /profile/:user_id/avatar_url publicly accessible -GET /profile/:user_id/displayname publicly accessible -GET /publicRooms includes avatar URLs -GET /publicRooms lists newly-created room -GET /publicRooms lists rooms -GET /r0/capabilities is not public -GET /register yields a set of flows -GET /rooms/:room_id/joined_members fetches my membership -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/state fetches entire room state -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -GET /rooms/:room_id/state/m.room.name gets name -GET /rooms/:room_id/state/m.room.power_levels can fetch levels -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -GET /rooms/:room_id/state/m.room.topic gets topic -Get left notifs for other users in sync and /keys/changes when user leaves -Getting messages going forward is limited for a departed room (SPEC-216) -Getting push rules doesn't corrupt the cache SYN-390 -Getting state IDs checks the events requested belong to the room -Getting state checks the events requested belong to the room -Ghost user must register before joining room -Guest non-joined user cannot call /events on default room -Guest non-joined user cannot call /events on invited room -Guest non-joined user cannot call /events on joined room -Guest non-joined user cannot call /events on shared room -Guest non-joined users can get individual state for world_readable rooms -Guest non-joined users can get individual state for world_readable rooms after leaving -Guest non-joined users can get state for world_readable rooms -Guest non-joined users cannot room initalSync for non-world_readable rooms -Guest non-joined users cannot send messages to guest_access rooms if not joined -Guest user can set display names -Guest user cannot call /events globally -Guest user cannot upgrade other users -Guest users can accept invites to private rooms over federation -Guest users can join guest_access rooms -Guest users can send messages to guest_access rooms if joined -If a device list update goes missing, the server resyncs on the next one -If remote user leaves room we no longer receive device updates -If remote user leaves room, changes device and rejoins we see update in /keys/changes -If remote user leaves room, changes device and rejoins we see update in sync -Inbound /make_join rejects attempts to join rooms where all users have left -Inbound /v1/make_join rejects remote attempts to join local users to rooms -Inbound /v1/send_join rejects incorrectly-signed joins -Inbound /v1/send_join rejects joins from other servers -Inbound /v1/send_leave rejects leaves from other servers -Inbound federation accepts a second soft-failed event -Inbound federation accepts attempts to join v2 rooms from servers with support -Inbound federation can backfill events -Inbound federation can get public room list -Inbound federation can get state for a room -Inbound federation can get state_ids for a room -Inbound federation can query profile data -Inbound federation can query room alias directory -Inbound federation can receive events -Inbound federation can receive invites via v1 API -Inbound federation can receive invites via v2 API -Inbound federation can receive redacted events -Inbound federation can receive v1 /send_join -Inbound federation can receive v2 /send_join -Inbound federation can return events -Inbound federation can return missing events for invite visibility -Inbound federation can return missing events for world_readable visibility -Inbound federation correctly soft fails events -Inbound federation of state requires event_id as a mandatory paramater -Inbound federation of state_ids requires event_id as a mandatory paramater -Inbound federation rejects attempts to join v1 rooms from servers without v1 support -Inbound federation rejects attempts to join v2 rooms from servers lacking version support -Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -Inbound federation rejects invite rejections which include invalid JSON for room version 6 -Inbound federation rejects invites which include invalid JSON for room version 6 -Inbound federation rejects receipts from wrong remote -Inbound federation rejects remote attempts to join local users to rooms -Inbound federation rejects remote attempts to kick local users to rooms -Inbound federation rejects typing notifications from wrong remote -Inbound: send_join rejects invalid JSON for room version 6 -Invalid JSON floats -Invalid JSON integers -Invalid JSON special values -Invited user can reject invite -Invited user can reject invite over federation -Invited user can reject invite over federation for empty room -Invited user can reject invite over federation several times -Invited user can see room metadata -Inviting an AS-hosted user asks the AS server -Lazy loading parameters in the filter are strictly boolean -Left rooms appear in the leave section of full state sync -Local delete device changes appear in v2 /sync -Local device key changes appear in /keys/changes -Local device key changes appear in v2 /sync -Local device key changes get to remote servers -Local new device changes appear in v2 /sync -Local non-members don't see posted message events -Local room members can get room messages -Local room members see posted message events -Local update device changes appear in v2 /sync -Local users can peek by room alias -Local users can peek into world_readable rooms by room ID -Message history can be paginated -Message history can be paginated over federation -Name/topic keys are correct -New account data appears in incremental v2 /sync -New read receipts appear in incremental v2 /sync -New room members see their own join event -New users appear in /keys/changes -Newly banned rooms appear in the leave section of incremental sync -Newly joined room is included in an incremental sync -Newly joined room is included in an incremental sync after invite -Newly left rooms appear in the leave section of gapped sync -Newly left rooms appear in the leave section of incremental sync -Newly updated tags appear in an incremental v2 /sync -Non-numeric ports in server names are rejected -Outbound federation can backfill events -Outbound federation can query profile data -Outbound federation can query room alias directory -Outbound federation can query v1 /send_join -Outbound federation can query v2 /send_join -Outbound federation can request missing events -Outbound federation can send events -Outbound federation can send invites via v1 API -Outbound federation can send invites via v2 API -Outbound federation can send room-join requests -Outbound federation correctly handles unsupported room versions -Outbound federation passes make_join failures through to the client -Outbound federation rejects backfill containing invalid JSON for events in room version 6 -Outbound federation rejects m.room.create events with an unknown room version -Outbound federation rejects send_join responses with no m.room.create event -Outbound federation sends receipts -Outbound federation will ignore a missing event with bad JSON for room version 6 -POST /createRoom creates a room with the given version -POST /createRoom ignores attempts to set the room version via creation_content -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -POST /createRoom makes a public room -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -POST /createRoom rejects attempts to create rooms with numeric versions -POST /createRoom rejects attempts to create rooms with unknown versions -POST /createRoom with creation content -POST /join/:room_alias can join a room -POST /join/:room_alias can join a room with custom content -POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /login as non-existing user is rejected -POST /login can log in as a user -POST /login can log in as a user with just the local part of the id -POST /login returns the same device_id as that in the request -POST /login wrong password is rejected -POST /media/r0/upload can create an upload -POST /redact disallows redaction of event in different room -POST /register allows registration of usernames with '-' -POST /register allows registration of usernames with '.' -POST /register allows registration of usernames with '/' -POST /register allows registration of usernames with '3' -POST /register allows registration of usernames with '=' -POST /register allows registration of usernames with '_' -POST /register allows registration of usernames with 'q' -POST /register can create a user -POST /register downcases capitals in usernames -POST /register rejects registration of usernames with '!' -POST /register rejects registration of usernames with '"' -POST /register rejects registration of usernames with ''' -POST /register rejects registration of usernames with ':' -POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '@' -POST /register rejects registration of usernames with '[' -POST /register rejects registration of usernames with '\' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ']' -POST /register rejects registration of usernames with '{' -POST /register rejects registration of usernames with '|' -POST /register rejects registration of usernames with '}' -POST /register rejects registration of usernames with '£' -POST /register rejects registration of usernames with 'é' -POST /register returns the same device_id as that in the request -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/join can join a room -POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/read_markers can create read marker -POST /rooms/:room_id/receipt can create receipts -POST /rooms/:room_id/redact/:event_id as original message sender redacts message -POST /rooms/:room_id/redact/:event_id as power user redacts message -POST /rooms/:room_id/redact/:event_id as random user does not redact message -POST /rooms/:room_id/send/:event_type sends a message -POST /rooms/:room_id/state/m.room.name sets name -POST /rooms/:room_id/state/m.room.topic sets topic -POST /rooms/:room_id/upgrade can upgrade a room version -POST rejects invalid utf-8 in JSON -POSTed media can be thumbnailed -PUT /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} updates device fields -PUT /directory/room/:room_alias creates alias -PUT /profile/:user_id/avatar_url sets my avatar -PUT /profile/:user_id/displayname sets my name -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/state/m.room.power_levels can set levels -PUT /rooms/:room_id/typing/:user_id sets typing notification -PUT power_levels should not explode if the old power levels were empty -Peeked rooms only turn up in the sync for the device who peeked them -Previously left rooms don't appear in the leave section of sync -Push rules come down in an initial /sync -Read markers appear in incremental v2 /sync -Read markers appear in initial v2 /sync -Read markers can be updated -Read receipts appear in initial v2 /sync -Real non-joined user cannot call /events on default room -Real non-joined user cannot call /events on invited room -Real non-joined user cannot call /events on joined room -Real non-joined user cannot call /events on shared room -Real non-joined users can get individual state for world_readable rooms -Real non-joined users can get individual state for world_readable rooms after leaving -Real non-joined users can get state for world_readable rooms -Real non-joined users cannot room initalSync for non-world_readable rooms -Real non-joined users cannot send messages to guest_access rooms if not joined -Receipts must be m.read -Redaction of a redaction redacts the redaction reason -Regular users can add and delete aliases in the default room configuration -Regular users can add and delete aliases when m.room.aliases is restricted -Regular users cannot create room aliases within the AS namespace -Regular users cannot register within the AS namespace -Remote media can be thumbnailed -Remote room alias queries can handle Unicode -Remote room members also see posted message events -Remote room members can get room messages -Remote user can backfill in a room with version 1 -Remote user can backfill in a room with version 2 -Remote user can backfill in a room with version 3 -Remote user can backfill in a room with version 4 -Remote user can backfill in a room with version 5 -Remote user can backfill in a room with version 6 -Remote users can join room by alias -Remote users may not join unfederated rooms -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected -Room aliases can contain Unicode -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself -Room members can join a room with an overridden displayname -Room members can override their displayname on a room-specific basis -Room state at a rejected message event is the same as its predecessor -Room state at a rejected state event is the same as its predecessor -Rooms a user is invited to appear in an incremental sync -Rooms a user is invited to appear in an initial sync -Rooms can be created with an initial invite list (SYN-205) -Server correctly handles incoming m.device_list_update -Server correctly handles transactions that break edu limits -Server correctly resyncs when client query keys and there is no remote cache -Server correctly resyncs when server leaves and rejoins a room -Server rejects invalid JSON in a version 6 room -Setting room topic reports m.room.topic to myself -Should not be able to take over the room by pretending there is no PL event -Should reject keys claiming to belong to a different user -State from remote users is included in the state in the initial sync -State from remote users is included in the timeline in an incremental sync -State is included in the timeline in the initial sync -Sync can be polled for updates -Sync is woken up for leaves -Syncing a new room with a large timeline limit isn't limited -Tags appear in an initial v2 /sync -Trying to get push rules with unknown rule_id fails with 404 -Typing can be explicitly stopped -Typing events appear in gapped sync -Typing events appear in incremental sync -Typing events appear in initial sync -Typing notification sent to local room members -Typing notifications also sent to remote room members -Typing notifications don't leak -Uninvited users cannot join the room -Unprivileged users can set m.room.topic if it only needs level 0 -User appears in user directory -User in private room doesn't appear in user directory -User joining then leaving public room appears and dissappears from directory -User in shared private room does appear in user directory until leave -User can create and send/receive messages in a room with version 1 -User can create and send/receive messages in a room with version 2 -User can create and send/receive messages in a room with version 3 -User can create and send/receive messages in a room with version 4 -User can create and send/receive messages in a room with version 5 -User can create and send/receive messages in a room with version 6 -User can invite local user to room with version 1 -User can invite local user to room with version 2 -User can invite local user to room with version 3 -User can invite local user to room with version 4 -User can invite local user to room with version 5 -User can invite local user to room with version 6 -User can invite remote user to room with version 1 -User can invite remote user to room with version 2 -User can invite remote user to room with version 3 -User can invite remote user to room with version 4 -User can invite remote user to room with version 5 -User can invite remote user to room with version 6 -User directory correctly update on display name change -User in dir while user still shares private rooms -User in shared private room does appear in user directory -User is offline if they set_presence=offline in their sync -User signups are forbidden from starting with '_' -Users can't delete other's aliases -Users cannot invite a user that is already in the room -Users cannot invite themselves to a room -Users cannot kick users from a room they are not in -Users cannot kick users who have already left a room -Users cannot set ban powerlevel higher than their own -Users cannot set kick powerlevel higher than their own -Users cannot set notifications powerlevel higher than their own -Users cannot set redact powerlevel higher than their own -Users receive device_list updates for their own devices -Users with sufficient power-level can delete other's aliases -Version responds 200 OK with valid structure -We can't peek into rooms with invited history_visibility -We can't peek into rooms with joined history_visibility -We can't peek into rooms with shared history_visibility -We don't send redundant membership state across incremental syncs by default -We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -We should see our own leave event, even if history_visibility is restricted (SYN-662) -Wildcard device messages over federation wake up /sync -Wildcard device messages wake up /sync -Wildcard device messages wake up /sync -avatar_url updates affect room member events -displayname updates affect room member events -local user can join room with version 1 -local user can join room with version 2 -local user can join room with version 3 -local user can join room with version 4 -local user can join room with version 5 -local user can join room with version 6 -m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -m.room.history_visibility == "joined" allows/forbids appropriately for Real users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -query for user with no keys returns empty key dict -remote user can join room with version 1 -remote user can join room with version 2 -remote user can join room with version 3 -remote user can join room with version 4 -remote user can join room with version 5 -remote user can join room with version 6 -setting 'm.room.name' respects room powerlevel -setting 'm.room.power_levels' respects room powerlevel -Federation publicRoom Name/topic keys are correct diff --git a/tests/test-config.toml b/tests/test-config.toml deleted file mode 100644 index c4666878..00000000 --- a/tests/test-config.toml +++ /dev/null @@ -1,15 +0,0 @@ -[global] - -# Server runs in same container as tests do, so localhost is fine -server_name = "localhost" - -# With a bit of luck /tmp is a RAM disk, so that the file system does not become the bottleneck while testing -database_path = "/tmp" - -# All the other settings are left at their defaults: -port = 6167 -max_request_size = 20_000_000 -allow_registration = true -trusted_servers = ["matrix.org"] -address = "127.0.0.1" -proxy = "none" \ No newline at end of file