diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index 7d0625fa86f7..478c8d639ae8 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -69,7 +69,7 @@ with open('pyproject.toml', 'w') as f: " python3 -c "$REMOVE_DEV_DEPENDENCIES" -pipx install poetry==1.1.12 +pipx install poetry==1.1.14 ~/.local/bin/poetry lock echo "::group::Patched pyproject.toml" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 50d28c68eeb8..c3638c35eb14 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,3 +1,16 @@ +# Commits in this file will be removed from GitHub blame results. +# +# To use this file locally, use: +# git blame --ignore-revs-file="path/to/.git-blame-ignore-revs" +# +# or configure the `blame.ignoreRevsFile` option in your git config. +# +# If ignoring a pull request that was not squash merged, only the merge +# commit needs to be put here. Child commits will be resolved from it. + +# Run black (#3679). +8b3d9b6b199abb87246f982d5db356f1966db925 + # Black reformatting (#5482). 32e7c9e7f20b57dd081023ac42d6931a8da9b3a3 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4bc29c820759..c8b033e8a473 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -328,9 +328,6 @@ jobs: - arrangement: monolith database: Postgres - - arrangement: workers - database: Postgres - steps: - name: Run actions/checkout@v2 for synapse uses: actions/checkout@v2 @@ -346,6 +343,30 @@ jobs: shell: bash name: Run Complement Tests + # XXX When complement with workers is stable, move this back into the standard + # "complement" matrix above. + # + # See https://github.com/matrix-org/synapse/issues/13161 + complement-workers: + if: "${{ !failure() && !cancelled() }}" + needs: linting-done + runs-on: ubuntu-latest + + steps: + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + - name: Prepare Complement's Prerequisites + run: synapse/.ci/scripts/setup_complement_prerequisites.sh + + - run: | + set -o pipefail + POSTGRES=1 WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + shell: bash + name: Run Complement Tests + # a job which marks all the other jobs as complete, thus allowing PRs to be merged. tests-done: if: ${{ always() }} diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index f35e82297f70..dd8e6fbb1cc9 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -127,12 +127,12 @@ jobs: run: | set -x DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx - pipx install poetry==1.1.12 + pipx install poetry==1.1.14 poetry remove -n twisted poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk poetry lock --no-update - # NOT IN 1.1.12 poetry lock --check + # NOT IN 1.1.14 poetry lock --check working-directory: synapse - run: | diff --git a/CHANGES.md b/CHANGES.md index 7eac6131b123..0e69f25e0e5c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,125 @@ +Synapse 1.64.0 (2022-08-02) +=========================== + +No significant changes since 1.64.0rc2. + + +Deprecation Warning +------------------- + +Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. +[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) + + +Synapse 1.64.0rc2 (2022-07-29) +============================== + +This RC reintroduces support for `account_threepid_delegates.email`, which was removed in 1.64.0rc1. It remains deprecated and will be removed altogether in Synapse v1.66.0. ([\#13406](https://github.com/matrix-org/synapse/issues/13406)) + + +Synapse 1.64.0rc1 (2022-07-26) +============================== + +This RC removed the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +We have also stopped building `.deb` packages for Ubuntu 21.10 as it is no longer an active version of Ubuntu. + + +Features +-------- + +- Improve error messages when media thumbnails cannot be served. ([\#13038](https://github.com/matrix-org/synapse/issues/13038)) +- Allow pagination from remote event after discovering it from [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event`. ([\#13205](https://github.com/matrix-org/synapse/issues/13205)) +- Add a `room_type` field in the responses for the list room and room details admin APIs. Contributed by @andrewdoh. ([\#13208](https://github.com/matrix-org/synapse/issues/13208)) +- Add support for room version 10. ([\#13220](https://github.com/matrix-org/synapse/issues/13220)) +- Add per-room rate limiting for room joins. For each room, Synapse now monitors the rate of join events in that room, and throttles additional joins if that rate grows too large. ([\#13253](https://github.com/matrix-org/synapse/issues/13253), [\#13254](https://github.com/matrix-org/synapse/issues/13254), [\#13255](https://github.com/matrix-org/synapse/issues/13255), [\#13276](https://github.com/matrix-org/synapse/issues/13276)) +- Support Implicit TLS (TLS without using a STARTTLS upgrade, typically on port 465) for sending emails, enabled by the new option `force_tls`. Contributed by Jan Schär. ([\#13317](https://github.com/matrix-org/synapse/issues/13317)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.15.0 where adding a user through the Synapse Admin API with a phone number would fail if the `enable_email_notifs` and `email_notifs_for_new_users` options were enabled. Contributed by @thomasweston12. ([\#13263](https://github.com/matrix-org/synapse/issues/13263)) +- Fix a bug introduced in Synapse 1.40.0 where a user invited to a restricted room would be briefly unable to join. ([\#13270](https://github.com/matrix-org/synapse/issues/13270)) +- Fix a long-standing bug where, in rare instances, Synapse could store the incorrect state for a room after a state resolution. ([\#13278](https://github.com/matrix-org/synapse/issues/13278)) +- Fix a bug introduced in v1.18.0 where the `synapse_pushers` metric would overcount pushers when they are replaced. ([\#13296](https://github.com/matrix-org/synapse/issues/13296)) +- Disable autocorrection and autocapitalisation on the username text field shown during registration when using SSO. ([\#13350](https://github.com/matrix-org/synapse/issues/13350)) +- Update locked version of `frozendict` to 2.3.3, which has fixes for memory leaks affecting `/sync`. ([\#13284](https://github.com/matrix-org/synapse/issues/13284), [\#13352](https://github.com/matrix-org/synapse/issues/13352)) + + +Improved Documentation +---------------------- + +- Provide an example of using the Admin API. Contributed by @jejo86. ([\#13231](https://github.com/matrix-org/synapse/issues/13231)) +- Move the documentation for how URL previews work to the URL preview module. ([\#13233](https://github.com/matrix-org/synapse/issues/13233), [\#13261](https://github.com/matrix-org/synapse/issues/13261)) +- Add another `contrib` script to help set up worker processes. Contributed by @villepeh. ([\#13271](https://github.com/matrix-org/synapse/issues/13271)) +- Document that certain config options were added or changed in Synapse 1.62. Contributed by @behrmann. ([\#13314](https://github.com/matrix-org/synapse/issues/13314)) +- Document the new `rc_invites.per_issuer` throttling option added in Synapse 1.63. ([\#13333](https://github.com/matrix-org/synapse/issues/13333)) +- Mention that BuildKit is needed when building Docker images for tests. ([\#13338](https://github.com/matrix-org/synapse/issues/13338)) +- Improve Caddy reverse proxy documentation. ([\#13344](https://github.com/matrix-org/synapse/issues/13344)) + + +Deprecations and Removals +------------------------- + +- Drop tables that were formerly used for groups/communities. ([\#12967](https://github.com/matrix-org/synapse/issues/12967)) +- Drop support for delegating email verification to an external server. ([\#13192](https://github.com/matrix-org/synapse/issues/13192)) +- Drop support for calling `/_matrix/client/v3/account/3pid/bind` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13239](https://github.com/matrix-org/synapse/issues/13239)) +- Stop building `.deb` packages for Ubuntu 21.10 (Impish Indri), which has reached end of life. ([\#13326](https://github.com/matrix-org/synapse/issues/13326)) + + +Internal Changes +---------------- + +- Use lower transaction isolation level when purging rooms to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12942](https://github.com/matrix-org/synapse/issues/12942)) +- Remove code which incorrectly attempted to reconcile state with remote servers when processing incoming events. ([\#12943](https://github.com/matrix-org/synapse/issues/12943)) +- Make the AS login method call `Auth.get_user_by_req` for checking the AS token. ([\#13094](https://github.com/matrix-org/synapse/issues/13094)) +- Always use a version of canonicaljson that supports the C implementation of frozendict. ([\#13172](https://github.com/matrix-org/synapse/issues/13172)) +- Add prometheus counters for ephemeral events and to device messages pushed to app services. Contributed by Brad @ Beeper. ([\#13175](https://github.com/matrix-org/synapse/issues/13175)) +- Refactor receipts servlet logic to avoid duplicated code. ([\#13198](https://github.com/matrix-org/synapse/issues/13198)) +- Preparation for database schema simplifications: populate `state_key` and `rejection_reason` for existing rows in the `events` table. ([\#13215](https://github.com/matrix-org/synapse/issues/13215)) +- Remove unused database table `event_reference_hashes`. ([\#13218](https://github.com/matrix-org/synapse/issues/13218)) +- Further reduce queries used sending events when creating new rooms. Contributed by Nick @ Beeper (@fizzadar). ([\#13224](https://github.com/matrix-org/synapse/issues/13224)) +- Call the v2 identity service `/3pid/unbind` endpoint, rather than v1. Contributed by @Vetchu. ([\#13240](https://github.com/matrix-org/synapse/issues/13240)) +- Use an asynchronous cache wrapper for the get event cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13242](https://github.com/matrix-org/synapse/issues/13242), [\#13308](https://github.com/matrix-org/synapse/issues/13308)) +- Optimise federation sender and appservice pusher event stream processing queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13251](https://github.com/matrix-org/synapse/issues/13251)) +- Log the stack when waiting for an entire room to be un-partial stated. ([\#13257](https://github.com/matrix-org/synapse/issues/13257)) +- Fix spurious warning when fetching state after a missing prev event. ([\#13258](https://github.com/matrix-org/synapse/issues/13258)) +- Clean-up tests for notifications. ([\#13260](https://github.com/matrix-org/synapse/issues/13260)) +- Do not fail build if complement with workers fails. ([\#13266](https://github.com/matrix-org/synapse/issues/13266)) +- Don't pull out state in `compute_event_context` for unconflicted state. ([\#13267](https://github.com/matrix-org/synapse/issues/13267), [\#13274](https://github.com/matrix-org/synapse/issues/13274)) +- Reduce the rebuild time for the complement-synapse docker image. ([\#13279](https://github.com/matrix-org/synapse/issues/13279)) +- Don't pull out the full state when creating an event. ([\#13281](https://github.com/matrix-org/synapse/issues/13281), [\#13307](https://github.com/matrix-org/synapse/issues/13307)) +- Upgrade from Poetry 1.1.12 to 1.1.14, to fix bugs when locking packages. ([\#13285](https://github.com/matrix-org/synapse/issues/13285)) +- Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently. ([\#13292](https://github.com/matrix-org/synapse/issues/13292)) +- Use `HTTPStatus` constants in place of literals in tests. ([\#13297](https://github.com/matrix-org/synapse/issues/13297)) +- Improve performance of query `_get_subset_users_in_room_with_profiles`. ([\#13299](https://github.com/matrix-org/synapse/issues/13299)) +- Up batch size of `bulk_get_push_rules` and `_get_joined_profiles_from_event_ids`. ([\#13300](https://github.com/matrix-org/synapse/issues/13300)) +- Remove unnecessary `json.dumps` from tests. ([\#13303](https://github.com/matrix-org/synapse/issues/13303)) +- Reduce memory usage of sending dummy events. ([\#13310](https://github.com/matrix-org/synapse/issues/13310)) +- Prevent formatting changes of [#3679](https://github.com/matrix-org/synapse/pull/3679) from appearing in `git blame`. ([\#13311](https://github.com/matrix-org/synapse/issues/13311)) +- Change `get_users_in_room` and `get_rooms_for_user` caches to enable pruning of old entries. ([\#13313](https://github.com/matrix-org/synapse/issues/13313)) +- Validate federation destinations and log an error if a destination is invalid. ([\#13318](https://github.com/matrix-org/synapse/issues/13318)) +- Fix `FederationClient.get_pdu()` returning events from the cache as `outliers` instead of original events we saw over federation. ([\#13320](https://github.com/matrix-org/synapse/issues/13320)) +- Reduce memory usage of state caches. ([\#13323](https://github.com/matrix-org/synapse/issues/13323)) +- Reduce the amount of state we store in the `state_cache`. ([\#13324](https://github.com/matrix-org/synapse/issues/13324)) +- Add missing type hints to open tracing module. ([\#13328](https://github.com/matrix-org/synapse/issues/13328), [\#13345](https://github.com/matrix-org/synapse/issues/13345), [\#13362](https://github.com/matrix-org/synapse/issues/13362)) +- Remove old base slaved store and de-duplicate cache ID generators. Contributed by Nick @ Beeper (@fizzadar). ([\#13329](https://github.com/matrix-org/synapse/issues/13329), [\#13349](https://github.com/matrix-org/synapse/issues/13349)) +- When reporting metrics is enabled, use ~8x less data to describe DB transaction metrics. ([\#13342](https://github.com/matrix-org/synapse/issues/13342)) +- Faster room joins: skip soft fail checks while Synapse only has partial room state, since the current membership of event senders may not be accurately known. ([\#13354](https://github.com/matrix-org/synapse/issues/13354)) + + +Synapse 1.63.1 (2022-07-20) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.63.0 where push actions were incorrectly calculated for appservice users. This caused performance issues on servers with large numbers of appservices. ([\#13332](https://github.com/matrix-org/synapse/issues/13332)) + + Synapse 1.63.0 (2022-07-19) =========================== @@ -82,7 +204,6 @@ Internal Changes - More aggressively rotate push actions. ([\#13211](https://github.com/matrix-org/synapse/issues/13211)) - Add `max_line_length` setting for Python files to the `.editorconfig`. Contributed by @sumnerevans @ Beeper. ([\#13228](https://github.com/matrix-org/synapse/issues/13228)) - Synapse 1.62.0 (2022-07-05) =========================== @@ -90,7 +211,6 @@ No significant changes since 1.62.0rc3. Authors of spam-checker plugins should consult the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.62/docs/upgrade.md#upgrading-to-v1620) to learn about the enriched signatures for spam checker callbacks, which are supported with this release of Synapse. - Synapse 1.62.0rc3 (2022-07-04) ============================== @@ -130,7 +250,7 @@ Bugfixes - Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939)) - Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973)) - Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979)) -- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced +- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991)) - Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018)) - Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041)) diff --git a/contrib/workers-bash-scripts/create-multiple-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md similarity index 93% rename from contrib/workers-bash-scripts/create-multiple-workers.md rename to contrib/workers-bash-scripts/create-multiple-generic-workers.md index ad5142fe15e7..d30310142946 100644 --- a/contrib/workers-bash-scripts/create-multiple-workers.md +++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md @@ -1,4 +1,4 @@ -# Creating multiple workers with a bash script +# Creating multiple generic workers with a bash script Setting up multiple worker configuration files manually can be time-consuming. You can alternatively create multiple worker configuration files with a simple `bash` script. For example: diff --git a/contrib/workers-bash-scripts/create-multiple-stream-writers.md b/contrib/workers-bash-scripts/create-multiple-stream-writers.md new file mode 100644 index 000000000000..0d2ca780a6a3 --- /dev/null +++ b/contrib/workers-bash-scripts/create-multiple-stream-writers.md @@ -0,0 +1,145 @@ +# Creating multiple stream writers with a bash script + +This script creates multiple [stream writer](https://github.com/matrix-org/synapse/blob/develop/docs/workers.md#stream-writers) workers. + +Stream writers require both replication and HTTP listeners. + +It also prints out the example lines for Synapse main configuration file. + +Remember to route necessary endpoints directly to a worker associated with it. + +If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array. + +```sh +#!/bin/bash + +# Start with these replication and http ports. +# The script loop starts with the exact port and then increments it by one. +REP_START_PORT=8034 +HTTP_START_PORT=8044 + +# Stream writer workers to generate. Feel free to add or remove them as you wish. +# Event persister ("events") isn't included here as it does not require its +# own HTTP listener. + +STREAM_WRITERS+=( "presence" "typing" "receipts" "to_device" "account_data" ) + +NUM_WRITERS=$(expr ${#STREAM_WRITERS[@]}) + +i=0 + +while [ $i -lt "$NUM_WRITERS" ] +do +cat << EOF > ${STREAM_WRITERS[$i]}_stream_writer.yaml +worker_app: synapse.app.generic_worker +worker_name: ${STREAM_WRITERS[$i]}_stream_writer + +# The replication listener on the main synapse process. +worker_replication_host: 127.0.0.1 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: $(expr $REP_START_PORT + $i) + resources: + - names: [replication] + + - type: http + port: $(expr $HTTP_START_PORT + $i) + resources: + - names: [client] + +worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml +EOF +HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer: + host: 127.0.0.1 + port: $(expr $REP_START_PORT + $i) +" + +HOMESERVER_YAML_STREAM_WRITERS+=$" ${STREAM_WRITERS[$i]}: ${STREAM_WRITERS[$i]}_stream_writer +" + +((i++)) +done + +cat << EXAMPLECONFIG +# Add these lines to your homeserver.yaml. +# Don't forget to configure your reverse proxy and +# necessary endpoints to their respective worker. + +# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md +# for more information. + +# Remember: Under NO circumstances should the replication +# listener be exposed to the public internet; +# it has no authentication and is unencrypted. + +instance_map: +$HOMESERVER_YAML_INSTANCE_MAP +stream_writers: +$HOMESERVER_YAML_STREAM_WRITERS +EXAMPLECONFIG +``` + +Copy the code above save it to a file ```create_stream_writers.sh``` (for example). + +Make the script executable by running ```chmod +x create_stream_writers.sh```. + +## Run the script to create workers and print out a sample configuration + +Simply run the script to create YAML files in the current folder and print out the required configuration for ```homeserver.yaml```. + +```console +$ ./create_stream_writers.sh + +# Add these lines to your homeserver.yaml. +# Don't forget to configure your reverse proxy and +# necessary endpoints to their respective worker. + +# See https://github.com/matrix-org/synapse/blob/develop/docs/workers.md +# for more information + +# Remember: Under NO circumstances should the replication +# listener be exposed to the public internet; +# it has no authentication and is unencrypted. + +instance_map: + presence_stream_writer: + host: 127.0.0.1 + port: 8034 + typing_stream_writer: + host: 127.0.0.1 + port: 8035 + receipts_stream_writer: + host: 127.0.0.1 + port: 8036 + to_device_stream_writer: + host: 127.0.0.1 + port: 8037 + account_data_stream_writer: + host: 127.0.0.1 + port: 8038 + +stream_writers: + presence: presence_stream_writer + typing: typing_stream_writer + receipts: receipts_stream_writer + to_device: to_device_stream_writer + account_data: account_data_stream_writer +``` + +Simply copy-and-paste the output to an appropriate place in your Synapse main configuration file. + +## Write directly to Synapse configuration file + +You could also write the output directly to homeserver main configuration file. **This, however, is not recommended** as even a small typo (such as replacing >> with >) can erase the entire ```homeserver.yaml```. + +If you do this, back up your original configuration file first: + +```console +# Back up homeserver.yaml first +cp /etc/matrix-synapse/homeserver.yaml /etc/matrix-synapse/homeserver.yaml.bak + +# Create workers and write output to your homeserver.yaml +./create_stream_writers.sh >> /etc/matrix-synapse/homeserver.yaml +``` diff --git a/debian/changelog b/debian/changelog index b1635bc39299..9efcb4f13293 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,27 @@ +matrix-synapse-py3 (1.64.0) stable; urgency=medium + + * New Synapse release 1.64.0. + + -- Synapse Packaging team Tue, 02 Aug 2022 10:32:30 +0100 + +matrix-synapse-py3 (1.64.0~rc2) stable; urgency=medium + + * New Synapse release 1.64.0rc2. + + -- Synapse Packaging team Fri, 29 Jul 2022 12:22:53 +0100 + +matrix-synapse-py3 (1.64.0~rc1) stable; urgency=medium + + * New Synapse release 1.64.0rc1. + + -- Synapse Packaging team Tue, 26 Jul 2022 12:11:49 +0100 + +matrix-synapse-py3 (1.63.1) stable; urgency=medium + + * New Synapse release 1.63.1. + + -- Synapse Packaging team Wed, 20 Jul 2022 13:36:52 +0100 + matrix-synapse-py3 (1.63.0) stable; urgency=medium * Clarify that homeserver server names are included in the data reported diff --git a/docker/Dockerfile b/docker/Dockerfile index 22707ed1425a..f4d8e6c92575 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -45,7 +45,7 @@ RUN \ # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. -# We use a specific commit from poetry's master branch instead of our usual 1.1.12, +# We use a specific commit from poetry's master branch instead of our usual 1.1.14, # to incorporate fixes to some bugs in `poetry export`. This commit corresponds to # https://github.com/python-poetry/poetry/pull/5156 and # https://github.com/python-poetry/poetry/issues/5141 ; diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 0f1570cfb654..84f836ff7bfc 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # Inherit from the official Synapse docker image ARG SYNAPSE_VERSION=latest FROM matrixdotorg/synapse:$SYNAPSE_VERSION diff --git a/docker/README-testing.md b/docker/README-testing.md index 1f0423f09b91..21b99963d8b7 100644 --- a/docker/README-testing.md +++ b/docker/README-testing.md @@ -22,6 +22,10 @@ Consult the [contributing guide][guideComplementSh] for instructions on how to u Under some circumstances, you may wish to build the images manually. The instructions below will lead you to doing that. +Note that these images can only be built using [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/), +therefore BuildKit needs to be enabled when calling `docker build`. This can be done by +setting `DOCKER_BUILDKIT=1` in your environment. + Start by building the base Synapse docker image. If you wish to run tests with the latest release of Synapse, instead of your current checkout, you can skip this step. From the root of the repository: diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 8bec0f611649..3cfff19f9acd 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -1,45 +1,62 @@ +# syntax=docker/dockerfile:1 # This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse # by including a built-in postgres instance, as well as setting up the homeserver so # that it is ready for testing via Complement. # # Instructions for building this image from those it depends on is detailed in this guide: # https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse -ARG SYNAPSE_VERSION=latest -FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION - -# Install postgresql -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -yqq postgresql-13 - -# Configure a user and create a database for Synapse -RUN pg_ctlcluster 13 main start && su postgres -c "echo \ - \"ALTER USER postgres PASSWORD 'somesecret'; \ - CREATE DATABASE synapse \ - ENCODING 'UTF8' \ - LC_COLLATE='C' \ - LC_CTYPE='C' \ - template=template0;\" | psql" && pg_ctlcluster 13 main stop -# Extend the shared homeserver config to disable rate-limiting, -# set Complement's static shared secret, enable registration, amongst other -# tweaks to get Synapse ready for testing. -# To do this, we copy the old template out of the way and then include it -# with Jinja2. -RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 -COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 - -WORKDIR /data +ARG SYNAPSE_VERSION=latest -COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf +# first of all, we create a base image with a postgres server and database, +# which we can copy into the target image. For repeated rebuilds, this is +# much faster than apt installing postgres each time. +# +# This trick only works because (a) the Synapse image happens to have all the +# shared libraries that postgres wants, (b) we use a postgres image based on +# the same debian version as Synapse's docker image (so the versions of the +# shared libraries match). -# Copy the entrypoint -COPY conf/start_for_complement.sh / +FROM postgres:13-bullseye AS postgres_base + # initialise the database cluster in /var/lib/postgresql + RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password -# Expose nginx's listener ports -EXPOSE 8008 8448 + # Configure a password and create a database for Synapse + RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single + RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single -ENTRYPOINT ["/start_for_complement.sh"] +# now build the final image, based on the Synapse image. -# Update the healthcheck to have a shorter check interval -HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh +FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION + # copy the postgres installation over from the image we built above + RUN adduser --system --uid 999 postgres --home /var/lib/postgresql + COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql + COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql + COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql + RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql + ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" + ENV PGDATA=/var/lib/postgresql/data + + # Extend the shared homeserver config to disable rate-limiting, + # set Complement's static shared secret, enable registration, amongst other + # tweaks to get Synapse ready for testing. + # To do this, we copy the old template out of the way and then include it + # with Jinja2. + RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 + COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 + + WORKDIR /data + + COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf + + # Copy the entrypoint + COPY conf/start_for_complement.sh / + + # Expose nginx's listener ports + EXPOSE 8008 8448 + + ENTRYPOINT ["/start_for_complement.sh"] + + # Update the healthcheck to have a shorter check interval + HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ + CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/conf/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf index 5dae3e63309a..b88bfc772e40 100644 --- a/docker/complement/conf/postgres.supervisord.conf +++ b/docker/complement/conf/postgres.supervisord.conf @@ -1,5 +1,5 @@ [program:postgres] -command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground +command=/usr/local/bin/prefix-log gosu postgres postgres # Only start if START_POSTGRES=1 autostart=%(ENV_START_POSTGRES)s diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index b5f675bc731e..9e554a865ee5 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -67,6 +67,10 @@ rc_joins: per_second: 9999 burst_count: 9999 +rc_joins_per_room: + per_second: 9999 + burst_count: 9999 + rc_3pid_validation: per_second: 1000 burst_count: 1000 diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 2e8c4e213755..2d56b084e269 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -35,7 +35,6 @@ - [Application Services](application_services.md) - [Server Notices](server_notices.md) - [Consent Tracking](consent_tracking.md) - - [URL Previews](development/url_previews.md) - [User Directory](user_directory.md) - [Message Retention Policies](message_retention_policies.md) - [Pluggable Modules](modules/index.md) diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index d4873f94905f..9aa489e4a32a 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -59,6 +59,7 @@ The following fields are possible in the JSON response body: - `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"]. - `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"]. - `state_events` - Total number of state_events of a room. Complexity of the room. + - `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. If the room does not define a type, the value will be `null`. * `offset` - The current pagination offset in rooms. This parameter should be used instead of `next_token` for room offset as `next_token` is not intended to be parsed. @@ -101,7 +102,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" }, ... (8 hidden items) ... { @@ -118,7 +120,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": null } ], "offset": 0, @@ -151,7 +154,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8 + "state_events": 8, + "room_type": null } ], "offset": 0, @@ -184,7 +188,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": null }, ... (98 hidden items) ... { @@ -201,7 +206,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": "m.space" } ], "offset": 0, @@ -238,7 +244,9 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" + }, ... (48 hidden items) ... { @@ -255,7 +263,9 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 8345 + "state_events": 8345, + "room_type": null + } ], "offset": 100, @@ -290,6 +300,8 @@ The following fields are possible in the JSON response body: * `guest_access` - Whether guests can join the room. One of: ["can_join", "forbidden"]. * `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"]. * `state_events` - Total number of state_events of a room. Complexity of the room. +* `room_type` - The type of the room taken from the room's creation event; for example "m.space" if the room is a space. + If the room does not define a type, the value will be `null`. The API is: @@ -317,7 +329,8 @@ A response body like the following is returned: "join_rules": "invite", "guest_access": null, "history_visibility": "shared", - "state_events": 93534 + "state_events": 93534, + "room_type": "m.space" } ``` diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 1235f1cb950e..0871cfebf566 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -544,7 +544,7 @@ Gets a list of all local media that a specific `user_id` has created. These are media that the user has uploaded themselves ([local media](../media_repository.md#local-media)), as well as [URL preview images](../media_repository.md#url-previews) requested by the user if the -[feature is enabled](../development/url_previews.md). +[feature is enabled](../usage/configuration/config_documentation.md#url_preview_enabled). By default, the response is ordered by descending creation date and ascending media ID. The newest media is on top. You can change the order with parameters diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index 8ef7d357d8cd..236856a6b047 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -237,3 +237,28 @@ poetry run pip install build && poetry run python -m build because [`build`](https://github.com/pypa/build) is a standardish tool which doesn't require poetry. (It's what we use in CI too). However, you could try `poetry build` too. + + +# Troubleshooting + +## Check the version of poetry with `poetry --version`. + +At the time of writing, the 1.2 series is beta only. We have seen some examples +where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly +by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical +[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain +[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775). + +It can also be useful to check the version of `poetry-core` in use. If you've +installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`. + +## Clear caches: `poetry cache clear --all pypi`. + +Poetry caches a bunch of information about packages that isn't readily available +from PyPI. (This is what makes poetry seem slow when doing the first +`poetry install`.) Try `poetry cache list` and `poetry cache clear --all +` to see if that fixes things. + +## Try `--verbose` or `--dry-run` arguments. + +Sometimes useful to see what poetry's internal logic is. diff --git a/docs/development/url_previews.md b/docs/development/url_previews.md deleted file mode 100644 index 154b9a5e12f4..000000000000 --- a/docs/development/url_previews.md +++ /dev/null @@ -1,61 +0,0 @@ -URL Previews -============ - -The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API -for URLs which outputs [Open Graph](https://ogp.me/) responses (with some Matrix -specific additions). - -This does have trade-offs compared to other designs: - -* Pros: - * Simple and flexible; can be used by any clients at any point -* Cons: - * If each homeserver provides one of these independently, all the HSes in a - room may needlessly DoS the target URI - * The URL metadata must be stored somewhere, rather than just using Matrix - itself to store the media. - * Matrix cannot be used to distribute the metadata between homeservers. - -When Synapse is asked to preview a URL it does the following: - -1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the - config). -2. Checks the in-memory cache by URLs and returns the result if it exists. (This - is also used to de-duplicate processing of multiple in-flight requests at once.) -3. Kicks off a background process to generate a preview: - 1. Checks the database cache by URL and timestamp and returns the result if it - has not expired and was successful (a 2xx return code). - 2. Checks if the URL matches an [oEmbed](https://oembed.com/) pattern. If it - does, update the URL to download. - 3. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 4. If the media is an image: - 1. Generates thumbnails. - 2. Generates an Open Graph response based on image properties. - 5. If the media is HTML: - 1. Decodes the HTML via the stored file. - 2. Generates an Open Graph response from the HTML. - 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: - 1. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 2. Convert the oEmbed response to an Open Graph response. - 3. Override any Open Graph data from the HTML with data from oEmbed. - 4. If an image exists in the Open Graph response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 6. If the media is JSON and an oEmbed URL was found: - 1. Convert the oEmbed response to an Open Graph response. - 2. If a thumbnail or image is in the oEmbed response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 7. Stores the result in the database cache. -4. Returns the result. - -The in-memory cache expires after 1 hour. - -Expired entries in the database cache (and their associated media files) are -deleted every 10 seconds. The default expiration time is 1 hour from download. diff --git a/docs/media_repository.md b/docs/media_repository.md index ba17f8a856f1..23e6da7f31f9 100644 --- a/docs/media_repository.md +++ b/docs/media_repository.md @@ -7,8 +7,7 @@ The media repository users. * caches avatars, attachments and their thumbnails for media uploaded by remote users. - * caches resources and thumbnails used for - [URL previews](development/url_previews.md). + * caches resources and thumbnails used for URL previews. All media in Matrix can be identified by a unique [MXC URI](https://spec.matrix.org/latest/client-server-api/#matrix-content-mxc-uris), @@ -59,8 +58,6 @@ remote_thumbnail/matrix.org/aa/bb/cccccccccccccccccccc/128-96-image-jpeg Note that `remote_thumbnail/` does not have an `s`. ## URL Previews -See [URL Previews](development/url_previews.md) for documentation on the URL preview -process. When generating previews for URLs, Synapse may download and cache various resources, including images. These resources are assigned temporary media IDs diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index 69caa8a73ee8..d1618e815526 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -79,63 +79,32 @@ server { } ``` -### Caddy v1 - -``` -matrix.example.com { - proxy /_matrix http://localhost:8008 { - transparent - } - - proxy /_synapse/client http://localhost:8008 { - transparent - } -} - -example.com:8448 { - proxy / http://localhost:8008 { - transparent - } -} -``` - ### Caddy v2 ``` matrix.example.com { - reverse_proxy /_matrix/* http://localhost:8008 - reverse_proxy /_synapse/client/* http://localhost:8008 + reverse_proxy /_matrix/* localhost:8008 + reverse_proxy /_synapse/client/* localhost:8008 } example.com:8448 { - reverse_proxy http://localhost:8008 + reverse_proxy localhost:8008 } ``` + [Delegation](delegate.md) example: -``` -(matrix-well-known-header) { - # Headers - header Access-Control-Allow-Origin "*" - header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS" - header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization" - header Content-Type "application/json" -} +``` example.com { - handle /.well-known/matrix/server { - import matrix-well-known-header - respond `{"m.server":"matrix.example.com:443"}` - } - - handle /.well-known/matrix/client { - import matrix-well-known-header - respond `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}` - } + header /.well-known/matrix/* Content-Type application/json + header /.well-known/matrix/* Access-Control-Allow-Origin * + respond /.well-known/matrix/server `{"m.server": "matrix.example.com:443"}` + respond /.well-known/matrix/client `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}` } matrix.example.com { - reverse_proxy /_matrix/* http://localhost:8008 - reverse_proxy /_synapse/client/* http://localhost:8008 + reverse_proxy /_matrix/* localhost:8008 + reverse_proxy /_synapse/client/* localhost:8008 } ``` diff --git a/docs/upgrade.md b/docs/upgrade.md index 312f0b87fedc..47a74b67de29 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,37 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.64.0 + +## Deprecation of the ability to delegate e-mail verification to identity servers + +Synapse v1.66.0 will remove the ability to delegate the tasks of verifying email address ownership, and password reset confirmation, to an identity server. + +If you require your homeserver to verify e-mail addresses or to support password resets via e-mail, please configure your homeserver with SMTP access so that it can send e-mails on its own behalf. +[Consult the configuration documentation for more information.](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#email) + +The option that will be removed is `account_threepid_delegates.email`. + + +## Changes to the event replication streams + +Synapse now includes a flag indicating if an event is an outlier when +replicating it to other workers. This is a forwards- and backwards-incompatible +change: v1.63 and workers cannot process events replicated by v1.64 workers, and +vice versa. + +Once all workers are upgraded to v1.64 (or downgraded to v1.63), event +replication will resume as normal. + +## frozendict release + +[frozendict 2.3.3](https://github.com/Marco-Sulla/python-frozendict/releases/tag/v2.3.3) +has recently been released, which fixes a memory leak that occurs during `/sync` +requests. We advise server administrators who installed Synapse via pip to upgrade +frozendict with `pip install --upgrade frozendict`. The Docker image +`matrixdotorg/synapse` and the Debian packages from `packages.matrix.org` already +include the updated library. + # Upgrading to v1.62.0 ## New signatures for spam checker callbacks diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md index 3cbedc5dfa30..c60b6da0de6f 100644 --- a/docs/usage/administration/admin_api/README.md +++ b/docs/usage/administration/admin_api/README.md @@ -18,6 +18,11 @@ already on your `$PATH` depending on how Synapse was installed. Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. ## Making an Admin API request +For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints) +that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a +reverse proxy. This means you should typically query the Admin API from a terminal on +the machine which runs Synapse. + Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by providing the token as either a query parameter or a request header. To add it as a request header in cURL: @@ -25,5 +30,17 @@ providing the token as either a query parameter or a request header. To add it a curl --header "Authorization: Bearer " ``` +For example, suppose we want to +[query the account](user_admin_api.md#query-user-account) of the user +`@foo:bar.com`. We need an admin access token (e.g. +`syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk`), and we need to know which port +Synapse's [`client` listener](config_documentation.md#listeners) is listening +on (e.g. `8008`). Then we can use the following command to request the account +information from the Admin API. + +```sh +curl --header "Authorization: Bearer syt_AjfVef2_L33JNpafeif_0feKJfeaf0CQpoZk" -X GET http://127.0.0.1:8008/_synapse/admin/v2/users/@foo:bar.com +``` + For more details on access tokens in Matrix, please refer to the complete [matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens). diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 6d7d700a134b..d8616f7dbda1 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1,11 +1,11 @@ # Configuring Synapse -This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified -through the many configuration settings documented here — each config option is explained, +This is intended as a guide to the Synapse configuration. The behavior of a Synapse instance can be modified +through the many configuration settings documented here — each config option is explained, including what the default is, how to change the default and what sort of behaviour the setting governs. -Also included is an example configuration for each setting. If you don't want to spend a lot of time +Also included is an example configuration for each setting. If you don't want to spend a lot of time thinking about options, the config as generated sets sensible defaults for all values. Do note however that the -database defaults to SQLite, which is not recommended for production usage. You can read more on this subject +database defaults to SQLite, which is not recommended for production usage. You can read more on this subject [here](../../setup/installation.md#using-postgresql). ## Config Conventions @@ -26,17 +26,17 @@ messages from the database after 5 minutes, rather than 5 months. In addition, configuration options referring to size use the following suffixes: * `M` = MiB, or 1,048,576 bytes -* `K` = KiB, or 1024 bytes +* `K` = KiB, or 1024 bytes For example, setting `max_avatar_size: 10M` means that Synapse will not accept files larger than 10,485,760 bytes -for a user avatar. +for a user avatar. -### YAML +### YAML The configuration file is a [YAML](https://yaml.org/) file, which means that certain syntax rules apply if you want your config file to be read properly. A few helpful things to know: -* `#` before any option in the config will comment out that setting and either a default (if available) will +* `#` before any option in the config will comment out that setting and either a default (if available) will be applied or Synapse will ignore the setting. Thus, in example #1 below, the setting will be read and - applied, but in example #2 the setting will not be read and a default will be applied. + applied, but in example #2 the setting will not be read and a default will be applied. Example #1: ```yaml @@ -50,13 +50,13 @@ apply if you want your config file to be read properly. A few helpful things to will determine whether a given setting is read as part of another setting, or considered on its own. Thus, in example #1, the `enabled` setting is read as a sub-option of the `presence` setting, and will be properly applied. - + However, the lack of indentation before the `enabled` setting in example #2 means that when reading the config, Synapse will consider both `presence` and `enabled` as different settings. In this case, `presence` has no value, and thus a default applied, and `enabled` is an option that Synapse doesn't recognize and thus ignores. - - Example #1: + + Example #1: ```yaml presence: enabled: false @@ -66,11 +66,11 @@ apply if you want your config file to be read properly. A few helpful things to presence: enabled: false ``` - In this manual, all top-level settings (ones with no indentation) are identified - at the beginning of their section (i.e. "### `example_setting`") and - the sub-options, if any, are identified and listed in the body of the section. + In this manual, all top-level settings (ones with no indentation) are identified + at the beginning of their section (i.e. "### `example_setting`") and + the sub-options, if any, are identified and listed in the body of the section. In addition, each setting has an example of its usage, with the proper indentation - shown. + shown. ## Contents [Modules](#modules) @@ -126,7 +126,7 @@ documentation on how to configure or create custom modules for Synapse. --- ### `modules` -Use the `module` sub-option to add modules under this option to extend functionality. +Use the `module` sub-option to add modules under this option to extend functionality. The `module` setting then has a sub-option, `config`, which can be used to define some configuration for the `module`. @@ -166,11 +166,11 @@ The `server_name` cannot be changed later so it is important to configure this correctly before you start Synapse. It should be all lowercase and may contain an explicit port. -There is no default for this option. - +There is no default for this option. + Example configuration #1: ```yaml -server_name: matrix.org +server_name: matrix.org ``` Example configuration #2: ```yaml @@ -188,7 +188,7 @@ pid_file: DATADIR/homeserver.pid --- ### `web_client_location` -The absolute URL to the web client which `/` will redirect to. Defaults to none. +The absolute URL to the web client which `/` will redirect to. Defaults to none. Example configuration: ```yaml @@ -217,7 +217,7 @@ By default, other servers will try to reach our server on port 8448, which can be inconvenient in some environments. Provided `https:///` on port 443 is routed to Synapse, this -option configures Synapse to serve a file at `https:///.well-known/matrix/server`. +option configures Synapse to serve a file at `https:///.well-known/matrix/server`. This will tell other servers to send traffic to port 443 instead. This option currently defaults to false. @@ -235,22 +235,24 @@ serve_server_wellknown: true This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. -If this option is provided, it parses the given yaml to json and +If this option is provided, it parses the given yaml to json and serves it on `/.well-known/matrix/client` endpoint alongside the standard properties. +*Added in Synapse 1.62.0.* + Example configuration: ```yaml -extra_well_known_client_content : +extra_well_known_client_content : option1: value1 option2: value2 ``` --- ### `soft_file_limit` - + Set the soft limit on the number of file descriptors synapse can use. Zero is used to indicate synapse should set the soft limit to the hard limit. -Defaults to 0. +Defaults to 0. Example configuration: ```yaml @@ -260,8 +262,8 @@ soft_file_limit: 3 ### `presence` Presence tracking allows users to see the state (e.g online/offline) -of other local and remote users. Set the `enabled` sub-option to false to -disable presence tracking on this homeserver. Defaults to true. +of other local and remote users. Set the `enabled` sub-option to false to +disable presence tracking on this homeserver. Defaults to true. This option replaces the previous top-level 'use_presence' option. Example configuration: @@ -272,8 +274,8 @@ presence: --- ### `require_auth_for_profile_requests` -Whether to require authentication to retrieve profile data (avatars, display names) of other -users through the client API. Defaults to false. Note that profile data is also available +Whether to require authentication to retrieve profile data (avatars, display names) of other +users through the client API. Defaults to false. Note that profile data is also available via the federation API, unless `allow_profile_lookup_over_federation` is set to false. Example configuration: @@ -284,11 +286,11 @@ require_auth_for_profile_requests: true ### `limit_profile_requests_to_users_who_share_rooms` Use this option to require a user to share a room with another user in order -to retrieve their profile information. Only checked on Client-Server +to retrieve their profile information. Only checked on Client-Server requests. Profile requests from other servers should be checked by the requesting server. Defaults to false. -Example configuration: +Example configuration: ```yaml limit_profile_requests_to_users_who_share_rooms: true ``` @@ -334,7 +336,7 @@ The default room version for newly created rooms on this server. Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions) For example, for room version 1, `default_room_version` should be set -to "1". +to "1". Currently defaults to "9". @@ -346,7 +348,7 @@ default_room_version: "8" ### `gc_thresholds` The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -356,7 +358,7 @@ gc_thresholds: [700, 10, 10] ### `gc_min_interval` The minimum time in seconds between each GC for a generation, regardless of -the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` +the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive generation 0 GCs, etc. Defaults to `[1s, 10s, 30s]`. @@ -398,7 +400,7 @@ enable_search: false ``` --- ### `ip_range_blacklist` - + This option prevents outgoing requests from being sent to the specified blacklisted IP address CIDR ranges. If this option is not specified then it defaults to private IP address ranges (see the example below). @@ -461,13 +463,13 @@ configuration. Sub-options for each listener include: -* `port`: the TCP port to bind to. +* `port`: the TCP port to bind to. * `bind_addresses`: a list of local addresses to listen on. The default is 'all local interfaces'. * `type`: the type of listener. Normally `http`, but other valid options are: - + * `manhole`: (see the docs [here](../../manhole.md)), * `metrics`: (see the docs [here](../../metrics-howto.md)), @@ -583,7 +585,7 @@ forward extremities reaches a given threshold, Synapse will send an `org.matrix.dummy_event` event, which will reduce the forward extremities in the room. -This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. +This setting defines the threshold (i.e. number of forward extremities in the room) at which dummy events are sent. The default value is 10. Example configuration: @@ -610,7 +612,7 @@ Useful options for Synapse admins. ### `admin_contact` -How to reach the server admin, used in `ResourceLimitError`. Defaults to none. +How to reach the server admin, used in `ResourceLimitError`. Defaults to none. Example configuration: ```yaml @@ -620,7 +622,7 @@ admin_contact: 'mailto:admin@server.com' ### `hs_disabled` and `hs_disabled_message` Blocks users from connecting to the homeserver and provides a human-readable reason -why the connection was blocked. Defaults to false. +why the connection was blocked. Defaults to false. Example configuration: ```yaml @@ -630,20 +632,20 @@ hs_disabled_message: 'Reason for why the HS is blocked' --- ### `limit_usage_by_mau` -This option disables/enables monthly active user blocking. Used in cases where the admin or -server owner wants to limit to the number of monthly active users. When enabled and a limit is +This option disables/enables monthly active user blocking. Used in cases where the admin or +server owner wants to limit to the number of monthly active users. When enabled and a limit is reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`. Defaults to false. If this is enabled, a value for `max_mau_value` must also be set. Example configuration: ```yaml -limit_usage_by_mau: true +limit_usage_by_mau: true ``` --- ### `max_mau_value` -This option sets the hard limit of monthly active users above which the server will start -blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. +This option sets the hard limit of monthly active users above which the server will start +blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. Example configuration: ```yaml @@ -656,7 +658,7 @@ The option `mau_trial_days` is a means to add a grace period for active users. I means that users must be active for the specified number of days before they can be considered active and guards against the case where lots of users sign up in a short space of time never to return after their initial -session. Defaults to 0. +session. Defaults to 0. Example configuration: ```yaml @@ -672,7 +674,7 @@ use the value of `mau_trial_days` instead. Example configuration: ```yaml -mau_appservice_trial_days: +mau_appservice_trial_days: my_appservice_id: 3 another_appservice_id: 6 ``` @@ -694,7 +696,7 @@ mau_limit_alerting: false If enabled, the metrics for the number of monthly active users will be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` -is true, this is implied to be true. Defaults to false. +is true, this is implied to be true. Defaults to false. Example configuration: ```yaml @@ -718,7 +720,7 @@ mau_limit_reserved_threepids: ### `server_context` This option is used by phonehome stats to group together related servers. -Defaults to none. +Defaults to none. Example configuration: ```yaml @@ -734,11 +736,11 @@ resource-constrained. Options for this setting include: * `enabled`: whether this check is enabled. Defaults to false. * `complexity`: the limit above which rooms cannot be joined. The default is 1.0. * `complexity_error`: override the error which is returned when the room is too complex with a - custom message. + custom message. * `admins_can_join`: allow server admins to join complex rooms. Default is false. Room complexity is an arbitrary measure based on factors such as the number of -users in the room. +users in the room. Example configuration: ```yaml @@ -773,7 +775,7 @@ allow_per_room_profiles: false ### `max_avatar_size` The largest permissible file size in bytes for a user avatar. Defaults to no restriction. -Use M for MB and K for KB. +Use M for MB and K for KB. Note that user avatar changes will not work if this is set without using Synapse's media repository. @@ -806,7 +808,7 @@ Example configuration: redaction_retention_period: 28d ``` --- -### `user_ips_max_age` +### `user_ips_max_age` How long to track users' last seen time and IPs in the database. @@ -821,7 +823,7 @@ user_ips_max_age: 14d Inhibits the `/requestToken` endpoints from returning an error that might leak information about whether an e-mail address is in use or not on this -homeserver. Defaults to false. +homeserver. Defaults to false. Note that for some endpoints the error situation is the e-mail already being used, and for others the error is entering the e-mail being unused. If this option is enabled, instead of returning an error, these endpoints will @@ -857,9 +859,9 @@ next_link_domain_whitelist: ["matrix.org"] ### `templates` and `custom_template_directory` These options define templates to use when generating email or HTML page contents. -The `custom_template_directory` determines which directory Synapse will try to +The `custom_template_directory` determines which directory Synapse will try to find template files in to use to generate email or HTML page contents. -If not set, or a file is not found within the template directory, a default +If not set, or a file is not found within the template directory, a default template from within the Synapse package will be used. See [here](../../templates.md) for more @@ -882,26 +884,26 @@ the `allowed_lifetime_min` and `allowed_lifetime_max` config options. If this feature is enabled, Synapse will regularly look for and purge events which are older than the room's maximum retention period. Synapse will also -filter events received over federation so that events that should have been -purged are ignored and not stored again. +filter events received over federation so that events that should have been +purged are ignored and not stored again. The message retention policies feature is disabled by default. This setting has the following sub-options: * `default_policy`: Default retention policy. If set, Synapse will apply it to rooms that lack the - 'm.room.retention' state event. This option is further specified by the - `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the - value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. + 'm.room.retention' state event. This option is further specified by the + `min_lifetime` and `max_lifetime` sub-options associated with it. Note that the + value of `min_lifetime` doesn't matter much because Synapse doesn't take it into account yet. -* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If - set, and the state of a room contains a `m.room.retention` event in its state +* `allowed_lifetime_min` and `allowed_lifetime_max`: Retention policy limits. If + set, and the state of a room contains a `m.room.retention` event in its state which contains a `min_lifetime` or a `max_lifetime` that's out of these bounds, Synapse will cap the room's policy to these limits when running purge jobs. * `purge_jobs` and the associated `shortest_max_lifetime` and `longest_max_lifetime` sub-options: Server admins can define the settings of the background jobs purging the events whose lifetime has expired under the `purge_jobs` section. - + If no configuration is provided for this option, a single job will be set up to delete expired events in every room daily. @@ -913,7 +915,7 @@ This setting has the following sub-options: range are optional, e.g. a job with no `shortest_max_lifetime` and a `longest_max_lifetime` of '3d' will handle every room with a retention policy whose `max_lifetime` is lower than or equal to three days. - + The rationale for this per-job configuration is that some rooms might have a retention policy with a low `max_lifetime`, where history needs to be purged of outdated messages on a more frequent basis than for the rest of the rooms @@ -942,7 +944,7 @@ retention: - longest_max_lifetime: 3d interval: 12h - shortest_max_lifetime: 3d - interval: 1d + interval: 1d ``` --- ## TLS ## @@ -954,11 +956,11 @@ Options related to TLS. This option specifies a PEM-encoded X509 certificate for TLS. This certificate, as of Synapse 1.0, will need to be a valid and verifiable -certificate, signed by a recognised Certificate Authority. Defaults to none. +certificate, signed by a recognised Certificate Authority. Defaults to none. Be sure to use a `.pem` file that includes the full certificate chain including any intermediate certificates (for instance, if using certbot, use -`fullchain.pem` as your certificate, not `cert.pem`). +`fullchain.pem` as your certificate, not `cert.pem`). Example configuration: ```yaml @@ -967,7 +969,7 @@ tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" --- ### `tls_private_key_path` -PEM-encoded private key for TLS. Defaults to none. +PEM-encoded private key for TLS. Defaults to none. Example configuration: ```yaml @@ -1124,40 +1126,43 @@ Caching can be configured through the following sub-options: This can also be set by the `SYNAPSE_CACHE_FACTOR` environment variable. Setting by environment variable takes priority over setting through the config file. - + Defaults to 0.5, which will halve the size of all caches. * `per_cache_factors`: A dictionary of cache name to cache factor for that individual cache. Overrides the global cache factor for a given cache. - + These can also be set through environment variables comprised of `SYNAPSE_CACHE_FACTOR_` + the name of the cache in capital letters and underscores. Setting by environment variable takes priority over setting through the config file. Ex. `SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0` - + Some caches have '*' and other characters that are not alphanumeric or underscores. These caches can be named with or without the special characters stripped. For example, to specify the cache factor for `*stateGroupCache*` via an environment variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - + * `expire_caches`: Controls whether cache entries are evicted after a specified time period. Defaults to true. Set to false to disable this feature. Note that never expiring - caches may result in excessive memory usage. + caches may result in excessive memory usage. * `cache_entry_ttl`: If `expire_caches` is enabled, this flag controls how long an entry can be in a cache without having been accessed before being evicted. - Defaults to 30m. + Defaults to 30m. * `sync_response_cache_duration`: Controls how long the results of a /sync request are cached for after a successful response is returned. A higher duration can help clients with intermittent connections, at the cost of higher memory usage. A value of zero means that sync responses are not cached. Defaults to 2m. + + *Changed in Synapse 1.62.0*: The default was changed from 0 to 2m. + * `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and - `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory - usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) + `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory + usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) to utilize this option, and all three of the options must be specified for this feature to work. This option defaults to off, enable it by providing values for the sub-options listed below. Please note that the feature will not work and may cause unstable behavior (such as excessive emptying of caches or exceptions) if all of the values are not provided. @@ -1170,7 +1175,7 @@ Caching can be configured through the following sub-options: for this option. * `min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - from being emptied while Synapse is evicting due to memory. There is no default value for this option. + from being emptied while Synapse is evicting due to memory. There is no default value for this option. Example configuration: ```yaml @@ -1194,7 +1199,7 @@ The cache factors (i.e. `caches.global_factor` and `caches.per_cache_factors`) kill -HUP [PID_OF_SYNAPSE_PROCESS] ``` -If you are running multiple workers, you must individually update the worker +If you are running multiple workers, you must individually update the worker config file and send this signal to each worker process. If you're using the [example systemd service](https://github.com/matrix-org/synapse/blob/develop/contrib/systemd/matrix-synapse.service) @@ -1214,7 +1219,7 @@ its data. Associated sub-options: * `name`: this option specifies the database engine to use: either `sqlite3` (for SQLite) - or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. + or `psycopg2` (for PostgreSQL). If no name is specified Synapse will default to SQLite. * `txn_limit` gives the maximum number of transactions to run per connection before reconnecting. Defaults to 0, which means no limit. @@ -1350,7 +1355,7 @@ databases: ``` --- ## Logging ## -Config options related to logging. +Config options related to logging. --- ### `log_config` @@ -1363,7 +1368,7 @@ log_config: "CONFDIR/SERVERNAME.log.config" ``` --- ## Ratelimiting ## -Options related to ratelimiting in Synapse. +Options related to ratelimiting in Synapse. Each ratelimiting configuration is made of two parameters: - `per_second`: number of requests a client can send per second. @@ -1373,7 +1378,7 @@ Each ratelimiting configuration is made of two parameters: Ratelimiting settings for client messaging. - + This is a ratelimiting option for messages that ratelimits sending based on the account the client is using. It defaults to: `per_second: 0.2`, `burst_count: 10`. @@ -1387,7 +1392,7 @@ rc_message: ### `rc_registration` This option ratelimits registration requests based on the client's IP address. -It defaults to `per_second: 0.17`, `burst_count: 3`. +It defaults to `per_second: 0.17`, `burst_count: 3`. Example configuration: ```yaml @@ -1398,7 +1403,7 @@ rc_registration: --- ### `rc_registration_token_validity` -This option checks the validity of registration tokens that ratelimits requests based on +This option checks the validity of registration tokens that ratelimits requests based on the client's IP address. Defaults to `per_second: 0.1`, `burst_count: 5`. @@ -1407,18 +1412,18 @@ Example configuration: rc_registration_token_validity: per_second: 0.3 burst_count: 6 -``` +``` --- ### `rc_login` This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP address. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `account` ratelimits login requests based on the account the client is attempting to log into. Defaults to `per_second: 0.17`, `burst_count: 3`. - + * `failted_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login attempts for this account. Defaults to `per_second: 0.17`, `burst_count: 3`. @@ -1439,9 +1444,9 @@ rc_login: --- ### `rc_admin_redaction` -This option sets ratelimiting redactions by room admins. If this is not explicitly +This option sets ratelimiting redactions by room admins. If this is not explicitly set then it uses the same ratelimiting as per `rc_message`. This is useful -to allow room admins to deal with abuse quickly. +to allow room admins to deal with abuse quickly. Example configuration: ```yaml @@ -1454,12 +1459,12 @@ rc_admin_redaction: This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options: -* `local`: ratelimits when users are joining rooms the server is already in. +* `local`: ratelimits when users are joining rooms the server is already in. Defaults to `per_second: 0.1`, `burst_count: 10`. * `remote`: ratelimits when users are trying to join rooms not on the server (which can be more computationally expensive than restricting locally). Defaults to - `per_second: 0.01`, `burst_count: 10` + `per_second: 0.01`, `burst_count: 10` Example configuration: ```yaml @@ -1471,6 +1476,25 @@ rc_joins: per_second: 0.03 burst_count: 12 ``` +--- +### `rc_joins_per_room` + +This option allows admins to ratelimit joins to a room based on the number of recent +joins (local or remote) to that room. It is intended to mitigate mass-join spam +waves which target multiple homeservers. + +By default, one join is permitted to a room every second, with an accumulating +buffer of up to ten instantaneous joins. + +Example configuration (default values): +```yaml +rc_joins_per_room: + per_second: 1 + burst_count: 10 +``` + +_Added in Synapse 1.64.0._ + --- ### `rc_3pid_validation` @@ -1486,9 +1510,9 @@ rc_3pid_validation: --- ### `rc_invites` -This option sets ratelimiting how often invites can be sent in a room or to a +This option sets ratelimiting how often invites can be sent in a room or to a specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and -`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. +`per_user` defaults to `per_second: 0.003`, `burst_count: 5`. Client requests that invite user(s) when [creating a room](https://spec.matrix.org/v1.2/client-server-api/#post_matrixclientv3createroom) @@ -1504,6 +1528,10 @@ The `rc_invites.per_user` limit applies to the *receiver* of the invite, rather sender, meaning that a `rc_invite.per_user.burst_count` of 5 mandates that a single user cannot *receive* more than a burst of 5 invites at a time. +In contrast, the `rc_invites.per_issuer` limit applies to the *issuer* of the invite, meaning that a `rc_invite.per_issuer.burst_count` of 5 mandates that single user cannot *send* more than a burst of 5 invites at a time. + +_Changed in version 1.63:_ added the `per_issuer` limit. + Example configuration: ```yaml rc_invites: @@ -1513,7 +1541,11 @@ rc_invites: per_user: per_second: 0.004 burst_count: 3 + per_issuer: + per_second: 0.5 + burst_count: 5 ``` + --- ### `rc_third_party_invite` @@ -1530,7 +1562,7 @@ rc_third_party_invite: --- ### `rc_federation` -Defines limits on federation requests. +Defines limits on federation requests. The `rc_federation` configuration has the following sub-options: * `window_size`: window size in milliseconds. Defaults to 1000. @@ -1559,7 +1591,7 @@ Sets outgoing federation transaction frequency for sending read-receipts, per-room. If we end up trying to send out more read-receipts, they will get buffered up -into fewer transactions. Defaults to 50. +into fewer transactions. Defaults to 50. Example configuration: ```yaml @@ -1570,9 +1602,9 @@ federation_rr_transactions_per_room_per_second: 40 Config options related to Synapse's media store. --- -### `enable_media_repo` +### `enable_media_repo` -Enable the media store service in the Synapse master. Defaults to true. +Enable the media store service in the Synapse master. Defaults to true. Set to false if you are using a separate media store worker. Example configuration: @@ -1597,7 +1629,7 @@ locations. Defaults to none. Associated sub-options are: * `store_local`: whether to store newly uploaded local files * `store_remote`: whether to store newly downloaded local files * `store_synchronous`: whether to wait for successful storage for local uploads -* `config`: sets a path to the resource through the `directory` option +* `config`: sets a path to the resource through the `directory` option Example configuration: ```yaml @@ -1616,7 +1648,7 @@ The largest allowed upload size in bytes. If you are using a reverse proxy you may also need to set this value in your reverse proxy's config. Defaults to 50M. Notably Nginx has a small max body size by default. -See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. +See [here](../../reverse_proxy.md) for more on using a reverse proxy with Synapse. Example configuration: ```yaml @@ -1638,14 +1670,14 @@ Whether to generate new thumbnails on the fly to precisely match the resolution requested by the client. If true then whenever a new resolution is requested by the client the server will generate a new thumbnail. If false the server will pick a thumbnail -from a precalculated list. Defaults to false. +from a precalculated list. Defaults to false. Example configuration: ```yaml dynamic_thumbnails: true ``` --- -### `thumbnail_sizes` +### `thumbnail_sizes` List of thumbnails to precalculate when an image is uploaded. Associated sub-options are: * `width` @@ -1763,7 +1795,7 @@ This option sets a list of IP address CIDR ranges that the URL preview spider is to access even if they are specified in `url_preview_ip_range_blacklist`. This is useful for specifying exceptions to wide-ranging blacklisted target IP ranges - e.g. for enabling URL previews for a specific private -website only visible in your network. Defaults to none. +website only visible in your network. Defaults to none. Example configuration: ```yaml @@ -1781,7 +1813,7 @@ This is more useful if you know there is an entire shape of URL that you know that will never want synapse to try to spider. Each list entry is a dictionary of url component attributes as returned -by urlparse.urlsplit as applied to the absolute form of the URL. See +by urlparse.urlsplit as applied to the absolute form of the URL. See [here](https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit) for more information. Some examples are: @@ -1856,8 +1888,8 @@ Example configuration: oEmbed allows for easier embedding content from a website. It can be used for generating URLs previews of services which support it. A default list of oEmbed providers is included with Synapse. Set `disable_default_providers` to true to disable using -these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each -should be in the form of providers.json). By default this list is empty. +these default oEmbed URLs. Use `additional_providers` to specify additional files with oEmbed configuration (each +should be in the form of providers.json). By default this list is empty. Example configuration: ```yaml @@ -1874,7 +1906,7 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- ### `recaptcha_public_key` -This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1882,9 +1914,9 @@ Example configuration: recaptcha_public_key: "YOUR_PUBLIC_KEY" ``` --- -### `recaptcha_private_key` +### `recaptcha_private_key` -This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is +This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is enabled. Example configuration: @@ -1895,7 +1927,7 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY" ### `enable_registration_captcha` Set to true to enable ReCaptcha checks when registering, preventing signup -unless a captcha is answered. Requires a valid ReCaptcha public/private key. +unless a captcha is answered. Requires a valid ReCaptcha public/private key. Defaults to false. Example configuration: @@ -1973,7 +2005,7 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat ### `enable_registration` Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration, -you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration +you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration without any verification, you must also set `enable_registration_without_verification` to true. Example configuration: @@ -1997,7 +2029,7 @@ Time that a user's session remains valid for, after they log in. Note that this is not currently compatible with guest logins. -Note also that this is calculated at login time: changes are not applied retrospectively to users who have already +Note also that this is calculated at login time: changes are not applied retrospectively to users who have already logged in. By default, this is infinite. @@ -2015,7 +2047,7 @@ For more information about refresh tokens, please see the [manual](user_authenti Note that this only applies to clients which advertise support for refresh tokens. -Note also that this is calculated at login time and refresh time: changes are not applied to +Note also that this is calculated at login time and refresh time: changes are not applied to existing sessions until they are refreshed. By default, this is 5 minutes. @@ -2113,7 +2145,7 @@ Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). Note that `enable_registration` must be set to true. Disabling this option will not delete any tokens previously generated. -Defaults to false. Set to true to enable. +Defaults to false. Set to true to enable. Example configuration: ```yaml @@ -2168,30 +2200,26 @@ default_identity_server: https://matrix.org --- ### `account_threepid_delegates` -Handle threepid (email/phone etc) registration and password resets through a set of -*trusted* identity servers. Note that this allows the configured identity server to -reset passwords for accounts! +Delegate verification of phone numbers to an identity server. -Be aware that if `email` is not set, and SMTP options have not been -configured in the email config block, registration and user password resets via -email will be globally disabled. +When a user wishes to add a phone number to their account, we need to verify that they +actually own that phone number, which requires sending them a text message (SMS). +Currently Synapse does not support sending those texts itself and instead delegates the +task to an identity server. The base URI for the identity server to be used is +specified by the `account_threepid_delegates.msisdn` option. -Additionally, if `msisdn` is not set, registration and password resets via msisdn -will be disabled regardless, and users will not be able to associate an msisdn -identifier to their account. This is due to Synapse currently not supporting -any method of sending SMS messages on its own. +If this is left unspecified, Synapse will not allow users to add phone numbers to +their account. -To enable using an identity server for operations regarding a particular third-party -identifier type, set the value to the URL of that identity server as shown in the -examples below. +(Servers handling the these requests must answer the `/requestToken` endpoints defined +by the Matrix Identity Service API +[specification](https://matrix.org/docs/spec/identity_service/latest).) -Servers handling the these requests must answer the `/requestToken` endpoints defined -by the Matrix Identity Service API [specification](https://matrix.org/docs/spec/identity_service/latest). +*Updated in Synapse 1.64.0*: The `email` option is deprecated. Example configuration: ```yaml account_threepid_delegates: - email: https://example.com # Delegate email sending to example.com msisdn: http://localhost:8090 # Delegate SMS sending to this local process ``` --- @@ -2242,7 +2270,7 @@ By default, any room aliases included in this list will be created as a publicly joinable room when the first user registers for the homeserver. If the room already exists, make certain it is a publicly joinable room, i.e. the join rule of the room must be set to 'public'. You can find more options -relating to auto-joining rooms below. +relating to auto-joining rooms below. Example configuration: ```yaml @@ -2296,9 +2324,9 @@ effect if `autocreate_auto_join_rooms` is true. Possible values for this option are: * "public_chat": the room is joinable by anyone, including federated servers if `autocreate_auto_join_rooms_federated` is true (the default). -* "private_chat": an invitation is required to join these rooms. +* "private_chat": an invitation is required to join these rooms. * "trusted_private_chat": an invitation is required to join this room and the invitee is - assigned a power level of 100 upon joining the room. + assigned a power level of 100 upon joining the room. If a value of "private_chat" or "trusted_private_chat" is used then `auto_join_mxid_localpart` must also be configured. @@ -2335,7 +2363,7 @@ auto_join_mxid_localpart: system ``` --- ### `auto_join_rooms_for_guests` - + When `auto_join_rooms` is specified, setting this flag to false prevents guest accounts from being automatically joined to the rooms. @@ -2347,7 +2375,7 @@ auto_join_rooms_for_guests: false ``` --- ### `inhibit_user_in_use_error` - + Whether to inhibit errors raised when registering a new account if the user ID already exists. If turned on, requests to `/register/available` will always show a user ID as available, and Synapse won't raise an error when starting @@ -2367,7 +2395,7 @@ Config options related to metrics. --- ### `enable_metrics` -Set to true to enable collection and rendering of performance metrics. +Set to true to enable collection and rendering of performance metrics. Defaults to false. Example configuration: @@ -2378,11 +2406,11 @@ enable_metrics: true ### `sentry` Use this option to enable sentry integration. Provide the DSN assigned to you by sentry -with the `dsn` setting. +with the `dsn` setting. NOTE: While attempts are made to ensure that the logs don't contain any sensitive information, this cannot be guaranteed. By enabling -this option the sentry server may therefore receive sensitive +this option the sentry server may therefore receive sensitive information, and it in turn may then disseminate sensitive information through insecure notification channels if so configured. @@ -2396,7 +2424,7 @@ sentry: Flags to enable Prometheus metrics which are not suitable to be enabled by default, either for performance reasons or limited use. -Currently the only option is `known_servers`, which publishes +Currently the only option is `known_servers`, which publishes `synapse_federation_known_servers`, a gauge of the number of servers this homeserver knows about, including itself. May cause performance problems on large homeservers. @@ -2440,7 +2468,7 @@ Config settings related to the client/server API ### `room_prejoin_state:` Controls for the state that is shared with users who receive an invite -to a room. By default, the following state event types are shared with users who +to a room. By default, the following state event types are shared with users who receive invites to the room: - m.room.join_rules - m.room.canonical_alias @@ -2451,7 +2479,7 @@ receive invites to the room: - m.room.topic To change the default behavior, use the following sub-options: -* `disable_default_event_types`: set to true to disable the above defaults. If this +* `disable_default_event_types`: set to true to disable the above defaults. If this is enabled, only the event types listed in `additional_event_types` are shared. Defaults to false. * `additional_event_types`: Additional state event types to share with users when they are invited @@ -2541,7 +2569,7 @@ Example configuration: ```yaml signing_key_path: "CONFDIR/SERVERNAME.signing.key" ``` ---- +--- ### `old_signing_keys` The keys that the server used to sign messages with but won't use @@ -2593,7 +2621,7 @@ Options for each entry in the list include: If specified, we will check that the response is signed by at least one of the given keys. * `accept_keys_insecurely`: a boolean. Normally, if `verify_keys` is unset, - and `federation_verify_certificates` is not `true`, synapse will refuse + and `federation_verify_certificates` is not `true`, synapse will refuse to start, because this would allow anyone who can spoof DNS responses to masquerade as the trusted key server. If you know what you are doing and are sure that your network environment provides a secure connection @@ -2671,15 +2699,15 @@ This setting has the following sub-options: * `service`: By default, the user has to go to our login page first. If you'd like to allow IdP-initiated login, set `allow_unsolicited` to true under `sp` in the `service` section. -* `config_path`: specify a separate pysaml2 configuration file thusly: +* `config_path`: specify a separate pysaml2 configuration file thusly: `config_path: "CONFDIR/sp_conf.py"` * `saml_session_lifetime`: The lifetime of a SAML session. This defines how long a user has to complete the authentication process, if `allow_unsolicited` is unset. The default is 15 minutes. -* `user_mapping_provider`: Using this option, an external module can be provided as a - custom solution to mapping attributes returned from a saml provider onto a matrix user. The +* `user_mapping_provider`: Using this option, an external module can be provided as a + custom solution to mapping attributes returned from a saml provider onto a matrix user. The `user_mapping_provider` has the following attributes: - * `module`: The custom module's class. - * `config`: Custom configuration values for the module. Use the values provided in the + * `module`: The custom module's class. + * `config`: Custom configuration values for the module. Use the values provided in the example if you are using the built-in user_mapping_provider, or provide your own config values for a custom class if you are using one. This section will be passed as a Python dictionary to the module's `parse_config` method. The built-in provider takes the following two @@ -2696,7 +2724,7 @@ This setting has the following sub-options: MXID was always calculated dynamically rather than stored in a table. For backwards- compatibility, we will look for `user_ids` matching such a pattern before creating a new account. This setting controls the SAML attribute which will be used for this backwards-compatibility lookup. Typically it should be 'uid', but if the attribute maps are changed, it may be necessary to change it. - The default is 'uid'. + The default is 'uid'. * `attribute_requirements`: It is possible to configure Synapse to only allow logins if SAML attributes match particular values. The requirements can be listed under `attribute_requirements` as shown in the example. All of the listed attributes must @@ -2704,7 +2732,7 @@ This setting has the following sub-options: * `idp_entityid`: If the metadata XML contains multiple IdP entities then the `idp_entityid` option must be set to the entity to redirect users to. Most deployments only have a single IdP entity and so should omit this option. - + Once SAML support is enabled, a metadata file will be exposed at `https://:/_synapse/client/saml2/metadata.xml`, which you may be able to @@ -2765,16 +2793,16 @@ saml2_config: sur_name: "the Sysadmin" email_address": ["admin@example.com"] contact_type": technical - + saml_session_lifetime: 5m - + user_mapping_provider: - # Below options are intended for the built-in provider, they should be - # changed if using a custom module. + # Below options are intended for the built-in provider, they should be + # changed if using a custom module. config: mxid_source_attribute: displayName mxid_mapping: dotreplace - + grandfathered_mxid_source_attribute: upn attribute_requirements: @@ -2902,7 +2930,7 @@ Options for each entry include: * `localpart_template`: Jinja2 template for the localpart of the MXID. If this is not set, the user will be prompted to choose their - own username (see the documentation for the `sso_auth_account_details.html` + own username (see the documentation for the `sso_auth_account_details.html` template). This template can use the `localpart_from_email` filter. * `confirm_localpart`: Whether to prompt the user to validate (or @@ -2915,7 +2943,7 @@ Options for each entry include: * `email_template`: Jinja2 template for the email address of the user. If unset, no email address will be added to the account. - + * `extra_attributes`: a map of Jinja2 templates for extra attributes to send back to the client during login. Note that these are non-standard and clients will ignore them without modifications. @@ -2925,7 +2953,7 @@ Options for each entry include: in the ID Token. -It is possible to configure Synapse to only allow logins if certain attributes +It is possible to configure Synapse to only allow logins if certain attributes match particular values in the OIDC userinfo. The requirements can be listed under `attribute_requirements` as shown here: ```yaml @@ -2940,7 +2968,7 @@ userinfo by expanding the `scopes` section of the OIDC config to retrieve additional information from the OIDC provider. If the OIDC claim is a list, then the attribute must match any value in the list. -Otherwise, it must exactly match the value of the claim. Using the example +Otherwise, it must exactly match the value of the claim. Using the example above, the `family_name` claim MUST be "Stephensson", but the `groups` claim MUST contain "admin". @@ -3005,7 +3033,7 @@ cas_config: Additional settings to use with single-sign on systems such as OpenID Connect, SAML2 and CAS. -Server admins can configure custom templates for pages related to SSO. See +Server admins can configure custom templates for pages related to SSO. See [here](../../templates.md) for more information. Options include: @@ -3021,7 +3049,7 @@ Options include: required login flows) is whitelisted in addition to any URLs in this list. By default, this list contains only the login fallback page. * `update_profile_information`: Use this setting to keep a user's profile fields in sync with information from - the identity provider. Currently only syncing the displayname is supported. Fields + the identity provider. Currently only syncing the displayname is supported. Fields are checked on every SSO login, and are updated if necessary. Note that enabling this option will override user profile information, regardless of whether users have opted-out of syncing that @@ -3065,7 +3093,7 @@ Additional sub-options for this setting include: Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. -* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the +* `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the "iss" claim will be required and validated for all JSON web tokens. * `audiences`: A list of audiences to validate the "aud" claim against. Optional. If provided the "aud" claim will be required and validated for all JSON web tokens. @@ -3075,7 +3103,7 @@ Additional sub-options for this setting include: Example configuration: ```yaml jwt_config: - enabled: true + enabled: true secret: "provided-by-your-issuer" algorithm: "provided-by-your-issuer" subject_claim: "name_of_claim" @@ -3086,7 +3114,7 @@ jwt_config: --- ### `password_config` -Use this setting to enable password-based logins. +Use this setting to enable password-based logins. This setting has the following sub-options: * `enabled`: Defaults to true. @@ -3095,10 +3123,10 @@ This setting has the following sub-options: to log in and reauthenticate, whilst preventing new users from setting passwords. * `localdb_enabled`: Set to false to disable authentication against the local password database. This is ignored if `enabled` is false, and is only useful - if you have other `password_providers`. Defaults to true. + if you have other `password_providers`. Defaults to true. * `pepper`: Set the value here to a secret random string for extra security. DO NOT CHANGE THIS AFTER INITIAL SETUP! -* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. +* `policy`: Define and enforce a password policy, such as minimum lengths for passwords, etc. Each parameter is optional. This is an implementation of MSC2000. Parameters are as follows: * `enabled`: Defaults to false. Set to true to enable. * `minimum_length`: Minimum accepted length for a password. Defaults to 0. @@ -3110,7 +3138,7 @@ This setting has the following sub-options: Defaults to false. * `require_uppercase`: Whether a password must contain at least one uppercase letter. Defaults to false. - + Example configuration: ```yaml @@ -3132,7 +3160,7 @@ password_config: The amount of time to allow a user-interactive authentication session to be active. -This defaults to 0, meaning the user is queried for their credentials +This defaults to 0, meaning the user is queried for their credentials before every action, but this can be overridden to allow a single validation to be re-used. This weakens the protections afforded by the user-interactive authentication process, by allowing for multiple @@ -3159,9 +3187,17 @@ Server admins can configure custom templates for email content. See This setting has the following sub-options: * `smtp_host`: The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. -* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 25. +* `smtp_port`: The port on the mail server for outgoing SMTP. Defaults to 465 if `force_tls` is true, else 25. + + _Changed in Synapse 1.64.0:_ the default port is now aware of `force_tls`. * `smtp_user` and `smtp_pass`: Username/password for authentication to the SMTP server. By default, no authentication is attempted. +* `force_tls`: By default, Synapse connects over plain text and then optionally upgrades + to TLS via STARTTLS. If this option is set to true, TLS is used from the start (Implicit TLS), + and the option `require_transport_security` is ignored. + It is recommended to enable this if supported by your mail server. + + _New in Synapse 1.64.0._ * `require_transport_security`: Set to true to require TLS transport security for SMTP. By default, Synapse will connect over plain text, and will then switch to TLS via STARTTLS *if the SMTP server supports it*. If this option is set, @@ -3195,8 +3231,8 @@ This setting has the following sub-options: message(s) have been sent to, e.g. "My super room". In addition, emails related to account administration will can use the '%(server_name)s' placeholder, which will be replaced by the value of the `server_name` setting in your Synapse configuration. - - Here is a list of subjects for notification emails that can be set: + + Here is a list of subjects for notification emails that can be set: * `message_from_person_in_room`: Subject to use to notify about one message from one or more user(s) in a room which has a name. Defaults to "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." * `message_from_person`: Subject to use to notify about one message from one or more user(s) in a @@ -3205,13 +3241,13 @@ This setting has the following sub-options: a room which doesn't have a name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s..." * `messages_in_room`: Subject to use to notify about multiple messages in a room which has a name. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room..." - * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. + * `messages_in_room_and_others`: Subject to use to notify about multiple messages in multiple rooms. Defaults to "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." * `messages_from_person_and_others`: Subject to use to notify about multiple messages from multiple persons in multiple rooms. This is similar to the setting above except it's used when - the room in which the notification was triggered has no name. Defaults to + the room in which the notification was triggered has no name. Defaults to "[%(app)s] You have messages on %(app)s from %(person)s and others..." - * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. + * `invite_from_person_to_room`: Subject to use to notify about an invite to a room which has a name. Defaults to "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." * `invite_from_person`: Subject to use to notify about an invite to a room which doesn't have a name. Defaults to "[%(app)s] %(person)s has invited you to chat on %(app)s..." @@ -3226,6 +3262,7 @@ email: smtp_port: 587 smtp_user: "exampleusername" smtp_pass: "examplepassword" + force_tls: true require_transport_security: true enable_tls: false notif_from: "Your Friendly %(app)s homeserver " @@ -3255,7 +3292,7 @@ Configuration settings related to push notifications --- ### `push` -This setting defines options for push notifications. +This setting defines options for push notifications. This option has a number of sub-options. They are as follows: * `include_content`: Clients requesting push notifications can either have the body of @@ -3270,7 +3307,7 @@ This option has a number of sub-options. They are as follows: notification saying only that a message arrived and who it came from. Defaults to true. Set to false to only include the event ID and room ID in push notification payloads. * `group_unread_count_by_room: false`: When a push notification is received, an unread count is also sent. - This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the + This number can either be calculated as the number of unread messages for the user, or the number of *rooms* the user has unread messages in. Defaults to true, meaning push clients will see the number of rooms with unread messages in them. Set to false to instead send the number of unread messages. @@ -3310,7 +3347,7 @@ encryption_enabled_by_default_for_room_type: invite --- ### `user_directory` -This setting defines options related to the user directory. +This setting defines options related to the user directory. This option has the following sub-options: * `enabled`: Defines whether users can search the user directory. If false then @@ -3328,7 +3365,7 @@ This option has the following sub-options: Set to true to return search results containing all known users, even if that user does not share a room with the requester. * `prefer_local_users`: Defines whether to prefer local users in search query results. - If set to true, local users are more likely to appear above remote users when searching the + If set to true, local users are more likely to appear above remote users when searching the user directory. Defaults to false. Example configuration: @@ -3393,15 +3430,15 @@ user_consent: ### `stats` Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) -for more. +for more. * `enabled`: Set to false to disable room and user statistics. Note that doing so may cause certain features (such as the room directory) not to work - correctly. Defaults to true. + correctly. Defaults to true. Example configuration: ```yaml -stats: +stats: enabled: false ``` --- @@ -3433,7 +3470,7 @@ server_notices: Set to false to disable searching the public room list. When disabled blocks searching local and remote room lists for local and remote -users by always returning an empty list for all queries. Defaults to true. +users by always returning an empty list for all queries. Defaults to true. Example configuration: ```yaml @@ -3459,7 +3496,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against the alias being created. Defaults to "*". * `room_id`: Matches against the room ID the alias is being pointed at. Defaults to "*" -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3489,7 +3526,7 @@ Options for the rules include: * `user_id`: Matches against the creator of the alias. Defaults to "*". * `alias`: Matches against any current local or canonical aliases associated with the room. Defaults to "*". * `room_id`: Matches against the room ID being published. Defaults to "*". -* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. +* `action`: Whether to "allow" or "deny" the request if the rule matches. Defaults to allow. Example configuration: ```yaml @@ -3541,14 +3578,14 @@ synapse or any other services which support opentracing Sub-options include: * `enabled`: whether tracing is enabled. Set to true to enable. Disabled by default. * `homeserver_whitelist`: The list of homeservers we wish to send and receive span contexts and span baggage. - See [here](../../opentracing.md) for more. + See [here](../../opentracing.md) for more. This is a list of regexes which are matched against the `server_name` of the homeserver. By default, it is empty, so no servers are matched. * `force_tracing_for_users`: # A list of the matrix IDs of users whose requests will always be traced, even if the tracing system would otherwise drop the traces due to probabilistic sampling. By default, the list is empty. * `jaeger_config`: Jaeger can be configured to sample traces at different rates. - All configuration options provided by Jaeger can be set here. Jaeger's configuration is + All configuration options provided by Jaeger can be set here. Jaeger's configuration is mostly related to trace sampling which is documented [here](https://www.jaegertracing.io/docs/latest/sampling/). Example configuration: @@ -3576,7 +3613,7 @@ Configuration options related to workers. ### `send_federation` Controls sending of outbound federation transactions on the main process. -Set to false if using a federation sender worker. Defaults to true. +Set to false if using a federation sender worker. Defaults to true. Example configuration: ```yaml @@ -3586,12 +3623,12 @@ send_federation: false ### `federation_sender_instances` It is possible to run multiple federation sender workers, in which case the -work is balanced across them. Use this setting to list the senders. +work is balanced across them. Use this setting to list the senders. This configuration setting must be shared between all federation sender workers, and if changed all federation sender workers must be stopped at the same time and then started, to ensure that all instances are running with the same config (otherwise -events may be dropped). +events may be dropped). Example configuration: ```yaml @@ -3602,7 +3639,7 @@ federation_sender_instances: ### `instance_map` When using workers this should be a map from worker name to the -HTTP replication listener of the worker, if configured. +HTTP replication listener of the worker, if configured. Example configuration: ```yaml @@ -3651,7 +3688,7 @@ worker_replication_secret: "secret_secret" Configuration for Redis when using workers. This *must* be enabled when using workers (unless using old style direct TCP configuration). This setting has the following sub-options: -* `enabled`: whether to use Redis support. Defaults to false. +* `enabled`: whether to use Redis support. Defaults to false. * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 * `password`: Optional password if configured on the Redis instance. @@ -3665,7 +3702,7 @@ redis: password: ``` ## Background Updates ## -Configuration settings related to background updates. +Configuration settings related to background updates. --- ### `background_updates` @@ -3674,7 +3711,7 @@ Background updates are database updates that are run in the background in batche The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to sleep can all be configured. This is helpful to speed up or slow down the updates. This setting has the following sub-options: -* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. +* `background_update_duration_ms`: How long in milliseconds to run a batch of background updates for. Defaults to 100. Set a different time to change the default. * `sleep_enabled`: Whether to sleep between updates. Defaults to true. Set to false to change the default. * `sleep_duration_ms`: If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. @@ -3684,7 +3721,7 @@ This setting has the following sub-options: * `default_batch_size`: The batch size to use for the first iteration of a new background update. The default is 100. Set a size to change the default. -Example configuration: +Example configuration: ```yaml background_updates: background_update_duration_ms: 500 diff --git a/mypy.ini b/mypy.ini index ea0ab003a85f..6add272990ed 100644 --- a/mypy.ini +++ b/mypy.ini @@ -84,9 +84,6 @@ disallow_untyped_defs = False [mypy-synapse.http.matrixfederationclient] disallow_untyped_defs = False -[mypy-synapse.logging.opentracing] -disallow_untyped_defs = False - [mypy-synapse.metrics._reactor_metrics] disallow_untyped_defs = False # This module imports select.epoll. That exists on Linux, but doesn't on macOS. diff --git a/poetry.lock b/poetry.lock index b7c0a6869a4e..b62c24ae1672 100644 --- a/poetry.lock +++ b/poetry.lock @@ -290,7 +290,7 @@ importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "frozendict" -version = "2.3.0" +version = "2.3.3" description = "A simple immutable dictionary" category = "main" optional = false @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "e96625923122e29b6ea5964379828e321b6cede2b020fc32c6f86c09d86d1ae8" +content-hash = "c24bbcee7e86dbbe7cdbf49f91a25b310bf21095452641e7440129f59b077f78" [metadata.files] attrs = [ @@ -1753,23 +1753,23 @@ flake8-comprehensions = [ {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"}, ] frozendict = [ - {file = "frozendict-2.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e18e2abd144a9433b0a8334582843b2aa0d3b9ac8b209aaa912ad365115fe2e1"}, - {file = "frozendict-2.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96dc7a02e78da5725e5e642269bb7ae792e0c9f13f10f2e02689175ebbfedb35"}, - {file = "frozendict-2.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:752a6dcfaf9bb20a7ecab24980e4dbe041f154509c989207caf185522ef85461"}, - {file = "frozendict-2.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:5346d9fc1c936c76d33975a9a9f1a067342963105d9a403a99e787c939cc2bb2"}, - {file = "frozendict-2.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60dd2253f1bacb63a7c486ec541a968af4f985ffb06602ee8954a3d39ec6bd2e"}, - {file = "frozendict-2.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e044602ce17e5cd86724add46660fb9d80169545164e763300a3b839cb1b79"}, - {file = "frozendict-2.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a27a69b1ac3591e4258325108aee62b53c0eeb6ad0a993ae68d3c7eaea980420"}, - {file = "frozendict-2.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f45ef5f6b184d84744fff97b61f6b9a855e24d36b713ea2352fc723a047afa5"}, - {file = "frozendict-2.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2d3f5016650c0e9a192f5024e68fb4d63f670d0ee58b099ed3f5b4c62ea30ecb"}, - {file = "frozendict-2.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6cf605916f50aabaaba5624c81eb270200f6c2c466c46960237a125ec8fe3ae0"}, - {file = "frozendict-2.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6da06e44904beae4412199d7e49be4f85c6cc168ab06b77c735ea7da5ce3454"}, - {file = "frozendict-2.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:1f34793fb409c4fa70ffd25bea87b01f3bd305fb1c6b09e7dff085b126302206"}, - {file = "frozendict-2.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd72494a559bdcd28aa71f4aa81860269cd0b7c45fff3e2614a0a053ecfd2a13"}, - {file = "frozendict-2.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00ea9166aa68cc5feed05986206fdbf35e838a09cb3feef998cf35978ff8a803"}, - {file = "frozendict-2.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9ffaf440648b44e0bc694c1a4701801941378ba3ba6541e17750ae4b4aeeb116"}, - {file = "frozendict-2.3.0-py3-none-any.whl", hash = "sha256:8578fe06815fcdcc672bd5603eebc98361a5317c1c3a13b28c6c810f6ea3b323"}, - {file = "frozendict-2.3.0.tar.gz", hash = "sha256:da4231adefc5928e7810da2732269d3ad7b5616295b3e693746392a8205ea0b5"}, + {file = "frozendict-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39942914c1217a5a49c7551495a103b3dbd216e19413687e003b859c6b0ebc12"}, + {file = "frozendict-2.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5589256058b31f2b91419fa30b8dc62dbdefe7710e688a3fd5b43849161eecc9"}, + {file = "frozendict-2.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:35eb7e59e287c41f4f712d4d3d2333354175b155d217b97c99c201d2d8920790"}, + {file = "frozendict-2.3.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:310aaf81793abf4f471895e6fe65e0e74a28a2aaf7b25c2ba6ccd4e35af06842"}, + {file = "frozendict-2.3.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c353c11010a986566a0cb37f9a783c560ffff7d67d5e7fd52221fb03757cdc43"}, + {file = "frozendict-2.3.3-cp36-cp36m-win_amd64.whl", hash = "sha256:15b5f82aad108125336593cec1b6420c638bf45f449c57e50949fc7654ea5a41"}, + {file = "frozendict-2.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a4737e5257756bd6b877504ff50185b705db577b5330d53040a6cf6417bb3cdb"}, + {file = "frozendict-2.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a14c11e33e8b0bc09e07bba3732c77a502c39edb8c3959fd9a0e490e031158"}, + {file = "frozendict-2.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:027952d1698ac9c766ef43711226b178cdd49d2acbdff396936639ad1d2a5615"}, + {file = "frozendict-2.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef818d66c85098a37cf42509545a4ba7dd0c4c679d6262123a8dc14cc474bab7"}, + {file = "frozendict-2.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812279f2b270c980112dc4e367b168054f937108f8044eced4199e0ab2945a37"}, + {file = "frozendict-2.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c1fb7efbfebc2075f781be3d9774e4ba6ce4fc399148b02097f68d4b3c4bc00a"}, + {file = "frozendict-2.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0b46d4bf95bce843c0151959d54c3e5b8d0ce29cb44794e820b3ec980d63eee"}, + {file = "frozendict-2.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38c4660f37fcc70a32ff997fe58e40b3fcc60b2017b286e33828efaa16b01308"}, + {file = "frozendict-2.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:919e3609844fece11ab18bcbf28a3ed20f8108ad4149d7927d413687f281c6c9"}, + {file = "frozendict-2.3.3-py3-none-any.whl", hash = "sha256:f988b482d08972a196664718167a993a61c9e9f6fe7b0ca2443570b5f20ca44a"}, + {file = "frozendict-2.3.3.tar.gz", hash = "sha256:398539c52af3c647d103185bbaa1291679f0507ad035fe3bab2a8b0366d52cf1"}, ] gitdb = [ {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, diff --git a/pyproject.toml b/pyproject.toml index 74a473bd34c6..af7def0c5308 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.63.0" +version = "1.64.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" @@ -110,7 +110,9 @@ jsonschema = ">=3.0.0" frozendict = ">=1,!=2.1.2" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" -canonicaljson = "^1.4.0" +# We require 1.5.0 to work around an issue when running against the C implementation of +# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36 +canonicaljson = "^1.5.0" # we use the type definitions added in signedjson 1.1. signedjson = "^1.1.0" # validating SSL certs for IP addresses requires service_identity 18.1. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 38564893e95b..cd2e64b75f9d 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -26,7 +26,6 @@ "debian:bookworm", "debian:sid", "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:impish", # 21.10 (EOL 2022-07) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) ) diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 26834a437e9c..543bba27c29e 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -166,22 +166,6 @@ "ui_auth_sessions", "ui_auth_sessions_credentials", "ui_auth_sessions_ips", - # Groups/communities is no longer supported. - "group_attestations_remote", - "group_attestations_renewals", - "group_invites", - "group_roles", - "group_room_categories", - "group_rooms", - "group_summary_roles", - "group_summary_room_categories", - "group_summary_rooms", - "group_summary_users", - "group_users", - "groups", - "local_group_membership", - "local_group_updates", - "remote_profile_cache", } diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 54d13026c9e5..f43965c1c837 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -27,6 +27,33 @@ class Ratelimiter: """ Ratelimit actions marked by arbitrary keys. + (Note that the source code speaks of "actions" and "burst_count" rather than + "tokens" and a "bucket_size".) + + This is a "leaky bucket as a meter". For each key to be tracked there is a bucket + containing some number 0 <= T <= `burst_count` of tokens corresponding to previously + permitted requests for that key. Each bucket starts empty, and gradually leaks + tokens at a rate of `rate_hz`. + + Upon an incoming request, we must determine: + - the key that this request falls under (which bucket to inspect), and + - the cost C of this request in tokens. + Then, if there is room in the bucket for C tokens (T + C <= `burst_count`), + the request is permitted and `cost` tokens are added to the bucket. + Otherwise the request is denied, and the bucket continues to hold T tokens. + + This means that the limiter enforces an average request frequency of `rate_hz`, + while accumulating a buffer of up to `burst_count` requests which can be consumed + instantaneously. + + The tricky bit is the leaking. We do not want to have a periodic process which + leaks every bucket! Instead, we track + - the time point when the bucket was last completely empty, and + - how many tokens have added to the bucket permitted since then. + Then for each incoming request, we can calculate how many tokens have leaked + since this time point, and use that to decide if we should accept or reject the + request. + Args: clock: A homeserver clock, for retrieving the current time rate_hz: The long term number of actions that can be performed in a second. @@ -41,14 +68,30 @@ def __init__( self.burst_count = burst_count self.store = store - # A ordered dictionary keeping track of actions, when they were last - # performed and how often. Each entry is a mapping from a key of arbitrary type - # to a tuple representing: - # * How many times an action has occurred since a point in time - # * The point in time - # * The rate_hz of this particular entry. This can vary per request + # An ordered dictionary representing the token buckets tracked by this rate + # limiter. Each entry maps a key of arbitrary type to a tuple representing: + # * The number of tokens currently in the bucket, + # * The time point when the bucket was last completely empty, and + # * The rate_hz (leak rate) of this particular bucket. self.actions: OrderedDict[Hashable, Tuple[float, float, float]] = OrderedDict() + def _get_key( + self, requester: Optional[Requester], key: Optional[Hashable] + ) -> Hashable: + """Use the requester's MXID as a fallback key if no key is provided.""" + if key is None: + if not requester: + raise ValueError("Must supply at least one of `requester` or `key`") + + key = requester.user.to_string() + return key + + def _get_action_counts( + self, key: Hashable, time_now_s: float + ) -> Tuple[float, float, float]: + """Retrieve the action counts, with a fallback representing an empty bucket.""" + return self.actions.get(key, (0.0, time_now_s, 0.0)) + async def can_do_action( self, requester: Optional[Requester], @@ -88,11 +131,7 @@ async def can_do_action( * The reactor timestamp for when the action can be performed next. -1 if rate_hz is less than or equal to zero """ - if key is None: - if not requester: - raise ValueError("Must supply at least one of `requester` or `key`") - - key = requester.user.to_string() + key = self._get_key(requester, key) if requester: # Disable rate limiting of users belonging to any AS that is configured @@ -121,7 +160,7 @@ async def can_do_action( self._prune_message_counts(time_now_s) # Check if there is an existing count entry for this key - action_count, time_start, _ = self.actions.get(key, (0.0, time_now_s, 0.0)) + action_count, time_start, _ = self._get_action_counts(key, time_now_s) # Check whether performing another action is allowed time_delta = time_now_s - time_start @@ -164,6 +203,37 @@ async def can_do_action( return allowed, time_allowed + def record_action( + self, + requester: Optional[Requester], + key: Optional[Hashable] = None, + n_actions: int = 1, + _time_now_s: Optional[float] = None, + ) -> None: + """Record that an action(s) took place, even if they violate the rate limit. + + This is useful for tracking the frequency of events that happen across + federation which we still want to impose local rate limits on. For instance, if + we are alice.com monitoring a particular room, we cannot prevent bob.com + from joining users to that room. However, we can track the number of recent + joins in the room and refuse to serve new joins ourselves if there have been too + many in the room across both homeservers. + + Args: + requester: The requester that is doing the action, if any. + key: An arbitrary key used to classify an action. Defaults to the + requester's user ID. + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. + _time_now_s: The current time. Optional, defaults to the current time according + to self.clock. Only used by tests. + """ + key = self._get_key(requester, key) + time_now_s = _time_now_s if _time_now_s is not None else self.clock.time() + action_count, time_start, rate_hz = self._get_action_counts(key, time_now_s) + self.actions[key] = (action_count + n_actions, time_start, rate_hz) + def _prune_message_counts(self, time_now_s: float) -> None: """Remove message count entries that have not exceeded their defined rate_hz limit diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 3f85d61b4633..00e81b3afc5e 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -84,6 +84,8 @@ class RoomVersion: # MSC3787: Adds support for a `knock_restricted` join rule, mixing concepts of # knocks and restricted join rules into the same join condition. msc3787_knock_restricted_join_rule: bool + # MSC3667: Enforce integer power levels + msc3667_int_only_power_levels: bool class RoomVersions: @@ -103,6 +105,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V2 = RoomVersion( "2", @@ -120,6 +123,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V3 = RoomVersion( "3", @@ -137,6 +141,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V4 = RoomVersion( "4", @@ -154,6 +159,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V5 = RoomVersion( "5", @@ -171,6 +177,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V6 = RoomVersion( "6", @@ -188,6 +195,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -205,6 +213,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V7 = RoomVersion( "7", @@ -222,6 +231,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V8 = RoomVersion( "8", @@ -239,6 +249,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) V9 = RoomVersion( "9", @@ -256,6 +267,7 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC2716v3 = RoomVersion( "org.matrix.msc2716v3", @@ -273,6 +285,7 @@ class RoomVersions: msc2716_historical=True, msc2716_redactions=True, msc3787_knock_restricted_join_rule=False, + msc3667_int_only_power_levels=False, ) MSC3787 = RoomVersion( "org.matrix.msc3787", @@ -290,6 +303,25 @@ class RoomVersions: msc2716_historical=False, msc2716_redactions=False, msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=False, + ) + V10 = RoomVersion( + "10", + RoomDisposition.STABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, ) @@ -308,6 +340,7 @@ class RoomVersions: RoomVersions.V9, RoomVersions.MSC2716v3, RoomVersions.MSC3787, + RoomVersions.V10, ) } diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 87f82bd9a5c7..8a583d3ec632 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -28,19 +28,22 @@ from synapse.config.logger import setup_logging from synapse.events import EventBase from synapse.handlers.admin import ExfiltrationWriter -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.server import HomeServer from synapse.storage.database import DatabasePool, LoggingDatabaseConnection +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.appservice import ( + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, +) +from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsWorkerStore +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore +from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.types import StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -49,16 +52,17 @@ class AdminCmdSlavedStore( - SlavedReceiptsStore, - SlavedAccountDataStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, SlavedFilteringStore, - SlavedDeviceInboxStore, SlavedDeviceStore, SlavedPushRuleStore, SlavedEventStore, - BaseSlavedStore, + TagsWorkerStore, + DeviceInboxWorkerStore, + AccountDataWorkerStore, + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, + RegistrationWorkerStore, + ReceiptsWorkerStore, RoomWorkerStore, ): def __init__( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 4a987fb759ea..42d1f6d21977 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -48,20 +48,12 @@ from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore -from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore -from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore -from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore -from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client import ( account_data, @@ -100,8 +92,15 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer +from synapse.storage.databases.main.account_data import AccountDataWorkerStore +from synapse.storage.databases.main.appservice import ( + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, +) from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore +from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore +from synapse.storage.databases.main.directory import DirectoryWorkerStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore from synapse.storage.databases.main.lock import LockStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore @@ -110,11 +109,15 @@ MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import PresenceStore +from synapse.storage.databases.main.profile import ProfileWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsWorkerStore +from synapse.storage.databases.main.registration import RegistrationWorkerStore from synapse.storage.databases.main.room import RoomWorkerStore from synapse.storage.databases.main.room_batch import RoomBatchStore from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.session import SessionStore from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore @@ -227,11 +230,11 @@ class GenericWorkerSlavedStore( UIAuthWorkerStore, EndToEndRoomKeyStore, PresenceStore, - SlavedDeviceInboxStore, + DeviceInboxWorkerStore, SlavedDeviceStore, - SlavedReceiptsStore, SlavedPushRuleStore, - SlavedAccountDataStore, + TagsWorkerStore, + AccountDataWorkerStore, SlavedPusherStore, CensorEventsStore, ClientIpWorkerStore, @@ -239,19 +242,20 @@ class GenericWorkerSlavedStore( SlavedKeyStore, RoomWorkerStore, RoomBatchStore, - DirectoryStore, - SlavedApplicationServiceStore, - SlavedRegistrationStore, - SlavedProfileStore, + DirectoryWorkerStore, + ApplicationServiceTransactionWorkerStore, + ApplicationServiceWorkerStore, + ProfileWorkerStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, + ReceiptsWorkerStore, + RegistrationWorkerStore, SearchStore, TransactionWorkerStore, LockStore, SessionStore, - BaseSlavedStore, ): # Properties that multiple storage classes define. Tell mypy what the # expected type is. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index df1c21446203..0963fb3bb4fb 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -53,6 +53,18 @@ "synapse_appservice_api_sent_events", "Number of events sent to the AS", ["service"] ) +sent_ephemeral_counter = Counter( + "synapse_appservice_api_sent_ephemeral", + "Number of ephemeral events sent to the AS", + ["service"], +) + +sent_todevice_counter = Counter( + "synapse_appservice_api_sent_todevice", + "Number of todevice messages sent to the AS", + ["service"], +) + HOUR_IN_MS = 60 * 60 * 1000 @@ -310,6 +322,8 @@ async def push_bulk( ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(serialized_events)) + sent_ephemeral_counter.labels(service.id).inc(len(ephemeral)) + sent_todevice_counter.labels(service.id).inc(len(to_device_messages)) return True except CodeMessageException as e: logger.warning( diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 6e11fbdb9ac9..7765c5b45417 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -86,14 +86,19 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: if email_config is None: email_config = {} + self.force_tls = email_config.get("force_tls", False) self.email_smtp_host = email_config.get("smtp_host", "localhost") - self.email_smtp_port = email_config.get("smtp_port", 25) + self.email_smtp_port = email_config.get( + "smtp_port", 465 if self.force_tls else 25 + ) self.email_smtp_user = email_config.get("smtp_user", None) self.email_smtp_pass = email_config.get("smtp_pass", None) self.require_transport_security = email_config.get( "require_transport_security", False ) self.enable_smtp_tls = email_config.get("enable_tls", True) + if self.force_tls and not self.enable_smtp_tls: + raise ConfigError("email.force_tls requires email.enable_tls to be true") if self.require_transport_security and not self.enable_smtp_tls: raise ConfigError( "email.require_transport_security requires email.enable_tls to be true" @@ -143,8 +148,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: if config.get("trust_identity_server_for_password_resets"): raise ConfigError( - 'The config option "trust_identity_server_for_password_resets" ' - 'has been replaced by "account_threepid_delegate". ' + 'The config option "trust_identity_server_for_password_resets" has been removed.' "Please consult the configuration manual at docs/usage/configuration/config_documentation.md for " "details and update your config file." ) diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 4fc1784efe62..5a91917b4a0e 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -112,6 +112,13 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: defaults={"per_second": 0.01, "burst_count": 10}, ) + # Track the rate of joins to a given room. If there are too many, temporarily + # prevent local joins and remote joins via this server. + self.rc_joins_per_room = RateLimitConfig( + config.get("rc_joins_per_room", {}), + defaults={"per_second": 1, "burst_count": 10}, + ) + # Ratelimit cross-user key requests: # * For local requests this is keyed by the sending device. # * For requests received over federation this is keyed by the origin. diff --git a/synapse/config/registration.py b/synapse/config/registration.py index fcf99be092b4..01fb0331bc70 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import argparse +import logging from typing import Any, Optional from synapse.api.constants import RoomCreationPreset @@ -20,6 +21,17 @@ from synapse.types import JsonDict, RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool +logger = logging.getLogger(__name__) + +LEGACY_EMAIL_DELEGATE_WARNING = """\ +Delegation of email verification to an identity server is now deprecated. To +continue to allow users to add email addresses to their accounts, and use them for +password resets, configure Synapse with an SMTP server via the `email` setting, and +remove `account_threepid_delegates.email`. + +This will be an error in a future version. +""" + class RegistrationConfig(Config): section = "registration" @@ -51,6 +63,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.bcrypt_rounds = config.get("bcrypt_rounds", 12) account_threepid_delegates = config.get("account_threepid_delegates") or {} + if "email" in account_threepid_delegates: + logger.warning(LEGACY_EMAIL_DELEGATE_WARNING) + self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") self.default_identity_server = config.get("default_identity_server") diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 3c69dd325f9b..1033496bb43d 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -42,6 +42,18 @@ # method: %(method)s """ +# A map from the given media type to the type of thumbnail we should generate +# for it. +THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP = { + "image/jpeg": "jpeg", + "image/jpg": "jpeg", + "image/webp": "jpeg", + # Thumbnails can only be jpeg or png. We choose png thumbnails for gif + # because it can have transparency. + "image/gif": "png", + "image/png": "png", +} + HTTP_PROXY_SET_WARNING = """\ The Synapse config url_preview_ip_range_blacklist will be ignored as an HTTP(s) proxy is configured.""" @@ -79,13 +91,22 @@ def parse_thumbnail_requirements( width = size["width"] height = size["height"] method = size["method"] - jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg") - png_thumbnail = ThumbnailRequirement(width, height, method, "image/png") - requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail) - requirements.setdefault("image/jpg", []).append(jpeg_thumbnail) - requirements.setdefault("image/webp", []).append(jpeg_thumbnail) - requirements.setdefault("image/gif", []).append(png_thumbnail) - requirements.setdefault("image/png", []).append(png_thumbnail) + + for format, thumbnail_format in THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.items(): + requirement = requirements.setdefault(format, []) + if thumbnail_format == "jpeg": + requirement.append( + ThumbnailRequirement(width, height, method, "image/jpeg") + ) + elif thumbnail_format == "png": + requirement.append( + ThumbnailRequirement(width, height, method, "image/png") + ) + else: + raise Exception( + "Unknown thumbnail mapping from %s to %s. This is a Synapse problem, please report!" + % (format, thumbnail_format) + ) return { media_type: tuple(thumbnails) for media_type, thumbnails in requirements.items() } diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 0fc2c4b27efe..965cb265daf8 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -740,6 +740,32 @@ def _check_power_levels( except Exception: raise SynapseError(400, "Not a valid power level: %s" % (v,)) + # Reject events with stringy power levels if required by room version + if ( + event.type == EventTypes.PowerLevels + and room_version_obj.msc3667_int_only_power_levels + ): + for k, v in event.content.items(): + if k in { + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + }: + if not isinstance(v, int): + raise SynapseError(400, f"{v!r} must be an integer.") + if k in {"events", "notifications", "users"}: + if not isinstance(v, dict) or not all( + isinstance(v, int) for v in v.values() + ): + raise SynapseError( + 400, + f"{v!r} must be a dict wherein all the values are integers.", + ) + key = (event.type, event.state_key) current_state = auth_events.get(key) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 98c203ada03b..17f624b68f58 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -24,9 +24,11 @@ RoomVersion, ) from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.event_auth import auth_types_for_event from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict from synapse.state import StateHandler from synapse.storage.databases.main import DataStore +from synapse.storage.state import StateFilter from synapse.types import EventID, JsonDict from synapse.util import Clock from synapse.util.stringutils import random_string @@ -120,8 +122,12 @@ async def build( The signed and hashed event. """ if auth_event_ids is None: - state_ids = await self._state.get_current_state_ids( - self.room_id, prev_event_ids + state_ids = await self._state.compute_state_after_events( + self.room_id, + prev_event_ids, + state_filter=StateFilter.from_types( + auth_types_for_event(self.room_version, self) + ), ) auth_event_ids = self._event_auth_handler.compute_auth_events( self, state_ids diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 66e6305562f5..842f5327c227 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -53,7 +53,7 @@ RoomVersion, RoomVersions, ) -from synapse.events import EventBase, builder +from synapse.events import EventBase, builder, make_event_from_dict from synapse.federation.federation_base import ( FederationBase, InvalidEventSignatureError, @@ -217,7 +217,7 @@ async def query_user_devices( ) async def claim_client_keys( - self, destination: str, content: JsonDict, timeout: int + self, destination: str, content: JsonDict, timeout: Optional[int] ) -> JsonDict: """Claims one-time keys for a device hosted on a remote server. @@ -299,7 +299,8 @@ async def get_pdu_from_destination_raw( moving to the next destination. None indicates no timeout. Returns: - The requested PDU, or None if we were unable to find it. + A copy of the requested PDU that is safe to modify, or None if we + were unable to find it. Raises: SynapseError, NotRetryingDestination, FederationDeniedError @@ -309,7 +310,7 @@ async def get_pdu_from_destination_raw( ) logger.debug( - "retrieved event id %s from %s: %r", + "get_pdu_from_destination_raw: retrieved event id %s from %s: %r", event_id, destination, transaction_data, @@ -358,54 +359,92 @@ async def get_pdu( The requested PDU, or None if we were unable to find it. """ + logger.debug( + "get_pdu: event_id=%s from destinations=%s", event_id, destinations + ) + # TODO: Rate limit the number of times we try and get the same event. - ev = self._get_pdu_cache.get(event_id) - if ev: - return ev + # We might need the same event multiple times in quick succession (before + # it gets persisted to the database), so we cache the results of the lookup. + # Note that this is separate to the regular get_event cache which caches + # events once they have been persisted. + event = self._get_pdu_cache.get(event_id) + + # If we don't see the event in the cache, go try to fetch it from the + # provided remote federated destinations + if not event: + pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) + + for destination in destinations: + now = self._clock.time_msec() + last_attempt = pdu_attempts.get(destination, 0) + if last_attempt + PDU_RETRY_TIME_MS > now: + logger.debug( + "get_pdu: skipping destination=%s because we tried it recently last_attempt=%s and we only check every %s (now=%s)", + destination, + last_attempt, + PDU_RETRY_TIME_MS, + now, + ) + continue + + try: + event = await self.get_pdu_from_destination_raw( + destination=destination, + event_id=event_id, + room_version=room_version, + timeout=timeout, + ) - pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) + pdu_attempts[destination] = now - signed_pdu = None - for destination in destinations: - now = self._clock.time_msec() - last_attempt = pdu_attempts.get(destination, 0) - if last_attempt + PDU_RETRY_TIME_MS > now: - continue + if event: + # Prime the cache + self._get_pdu_cache[event.event_id] = event - try: - signed_pdu = await self.get_pdu_from_destination_raw( - destination=destination, - event_id=event_id, - room_version=room_version, - timeout=timeout, - ) + # FIXME: We should add a `break` here to avoid calling every + # destination after we already found a PDU (will follow-up + # in a separate PR) - pdu_attempts[destination] = now - - except SynapseError as e: - logger.info( - "Failed to get PDU %s from %s because %s", event_id, destination, e - ) - continue - except NotRetryingDestination as e: - logger.info(str(e)) - continue - except FederationDeniedError as e: - logger.info(str(e)) - continue - except Exception as e: - pdu_attempts[destination] = now + except SynapseError as e: + logger.info( + "Failed to get PDU %s from %s because %s", + event_id, + destination, + e, + ) + continue + except NotRetryingDestination as e: + logger.info(str(e)) + continue + except FederationDeniedError as e: + logger.info(str(e)) + continue + except Exception as e: + pdu_attempts[destination] = now + + logger.info( + "Failed to get PDU %s from %s because %s", + event_id, + destination, + e, + ) + continue - logger.info( - "Failed to get PDU %s from %s because %s", event_id, destination, e - ) - continue + if not event: + return None - if signed_pdu: - self._get_pdu_cache[event_id] = signed_pdu + # `event` now refers to an object stored in `get_pdu_cache`. Our + # callers may need to modify the returned object (eg to set + # `event.internal_metadata.outlier = true`), so we return a copy + # rather than the original object. + event_copy = make_event_from_dict( + event.get_pdu_json(), + event.room_version, + ) - return signed_pdu + return event_copy async def get_room_state_ids( self, destination: str, room_id: str, event_id: str diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 5dfdc86740d1..ae550d3f4de6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -118,6 +118,7 @@ def __init__(self, hs: "HomeServer"): self._federation_event_handler = hs.get_federation_event_handler() self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() + self._room_member_handler = hs.get_room_member_handler() self._state_storage_controller = hs.get_storage_controllers().state @@ -621,6 +622,15 @@ async def on_make_join_request( ) raise IncompatibleRoomVersionError(room_version=room_version) + # Refuse the request if that room has seen too many joins recently. + # This is in addition to the HS-level rate limiting applied by + # BaseFederationServlet. + # type-ignore: mypy doesn't seem able to deduce the type of the limiter(!?) + await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type] + requester=None, + key=room_id, + update=False, + ) pdu = await self.handler.on_make_join_request(origin, room_id, user_id) return {"event": pdu.get_templated_pdu_json(), "room_version": room_version} @@ -655,6 +665,12 @@ async def on_send_join_request( room_id: str, caller_supports_partial_state: bool = False, ) -> Dict[str, Any]: + await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type] + requester=None, + key=room_id, + update=False, + ) + event, context = await self._on_send_membership_event( origin, content, Membership.JOIN, room_id ) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 99a794c04288..94a65ac65fd9 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -351,7 +351,11 @@ async def _process_event_queue_loop(self) -> None: self._is_processing = True while True: last_token = await self.store.get_federation_out_pos("events") - next_token, events = await self.store.get_all_new_events_stream( + ( + next_token, + events, + event_to_received_ts, + ) = await self.store.get_all_new_events_stream( last_token, self._last_poked_id, limit=100 ) @@ -476,7 +480,7 @@ async def handle_event(event: EventBase) -> None: await self._send_pdu(event, sharded_destinations) now = self.clock.time_msec() - ts = await self.store.get_received_ts(event.event_id) + ts = event_to_received_ts[event.event_id] assert ts is not None synapse.metrics.event_processing_lag_by_event.labels( "federation_sender" @@ -509,7 +513,7 @@ async def handle_room_events(events: List[EventBase]) -> None: if events: now = self.clock.time_msec() - ts = await self.store.get_received_ts(events[-1].event_id) + ts = event_to_received_ts[events[-1].event_id] assert ts is not None synapse.metrics.event_processing_lag.labels( diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 9e84bd677e98..32074b8ca690 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -619,7 +619,7 @@ async def query_user_devices( ) async def claim_client_keys( - self, destination: str, query_content: JsonDict, timeout: int + self, destination: str, query_content: JsonDict, timeout: Optional[int] ) -> JsonDict: """Claim one-time keys for a list of devices hosted on a remote server. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 84100a5a5257..bb0f8d6b7b55 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -309,7 +309,7 @@ async def new_func( raise # update the active opentracing span with the authenticated entity - set_tag("authenticated_entity", origin) + set_tag("authenticated_entity", str(origin)) # if the origin is authenticated and whitelisted, use its span context # as the parent. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 814553e0988d..203b62e0151b 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -104,14 +104,15 @@ async def _notify_interested_services(self, max_token: RoomStreamToken) -> None: with Measure(self.clock, "notify_interested_services"): self.is_processing = True try: - limit = 100 upper_bound = -1 while upper_bound < self.current_max: + last_token = await self.store.get_appservice_last_pos() ( upper_bound, events, - ) = await self.store.get_new_events_for_appservice( - self.current_max, limit + event_to_received_ts, + ) = await self.store.get_all_new_events_stream( + last_token, self.current_max, limit=100, get_prev_content=True ) events_by_room: Dict[str, List[EventBase]] = {} @@ -150,7 +151,7 @@ async def start_scheduler() -> None: ) now = self.clock.time_msec() - ts = await self.store.get_received_ts(event.event_id) + ts = event_to_received_ts[event.event_id] assert ts is not None synapse.metrics.event_processing_lag_by_event.labels( @@ -187,7 +188,7 @@ async def handle_room_events(events: Iterable[EventBase]) -> None: if events: now = self.clock.time_msec() - ts = await self.store.get_received_ts(events[-1].event_id) + ts = event_to_received_ts[events[-1].event_id] assert ts is not None synapse.metrics.event_processing_lag.labels( diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c05a170c5531..1a8379854ce1 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -118,8 +118,8 @@ async def get_device(self, user_id: str, device_id: str) -> JsonDict: ips = await self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - set_tag("device", device) - set_tag("ips", ips) + set_tag("device", str(device)) + set_tag("ips", str(ips)) return device @@ -170,7 +170,7 @@ async def get_user_ids_changed( """ set_tag("user_id", user_id) - set_tag("from_token", from_token) + set_tag("from_token", str(from_token)) now_room_key = self.store.get_room_max_token() room_ids = await self.store.get_rooms_for_user(user_id) @@ -795,7 +795,7 @@ async def incoming_device_list_update( """ set_tag("origin", origin) - set_tag("edu_content", edu_content) + set_tag("edu_content", str(edu_content)) user_id = edu_content.pop("user_id") device_id = edu_content.pop("device_id") stream_id = str(edu_content.pop("stream_id")) # They may come as ints diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 52bb5c9c55ab..c938339ddd89 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple import attr from canonicaljson import encode_canonical_json @@ -92,7 +92,11 @@ def __init__(self, hs: "HomeServer"): @trace async def query_devices( - self, query_body: JsonDict, timeout: int, from_user_id: str, from_device_id: str + self, + query_body: JsonDict, + timeout: int, + from_user_id: str, + from_device_id: Optional[str], ) -> JsonDict: """Handle a device key query from a client @@ -120,9 +124,7 @@ async def query_devices( the number of in-flight queries at a time. """ async with self._query_devices_linearizer.queue((from_user_id, from_device_id)): - device_keys_query: Dict[str, Iterable[str]] = query_body.get( - "device_keys", {} - ) + device_keys_query: Dict[str, List[str]] = query_body.get("device_keys", {}) # separate users by domain. # make a map from domain to user_id to device_ids @@ -136,8 +138,8 @@ async def query_devices( else: remote_queries[user_id] = device_ids - set_tag("local_key_query", local_query) - set_tag("remote_key_query", remote_queries) + set_tag("local_key_query", str(local_query)) + set_tag("remote_key_query", str(remote_queries)) # First get local devices. # A map of destination -> failure response. @@ -341,7 +343,7 @@ async def _query_devices_for_destination( failure = _exception_to_failure(e) failures[destination] = failure set_tag("error", True) - set_tag("reason", failure) + set_tag("reason", str(failure)) return @@ -392,7 +394,7 @@ async def get_cross_signing_keys_from_cache( @trace async def query_local_devices( - self, query: Dict[str, Optional[List[str]]] + self, query: Mapping[str, Optional[List[str]]] ) -> Dict[str, Dict[str, dict]]: """Get E2E device keys for local users @@ -403,7 +405,7 @@ async def query_local_devices( Returns: A map from user_id -> device_id -> device details """ - set_tag("local_query", query) + set_tag("local_query", str(query)) local_query: List[Tuple[str, Optional[str]]] = [] result_dict: Dict[str, Dict[str, dict]] = {} @@ -461,7 +463,7 @@ async def on_federation_query_client_keys( @trace async def claim_one_time_keys( - self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: int + self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int] ) -> JsonDict: local_query: List[Tuple[str, str, str]] = [] remote_queries: Dict[str, Dict[str, Dict[str, str]]] = {} @@ -475,8 +477,8 @@ async def claim_one_time_keys( domain = get_domain_from_id(user_id) remote_queries.setdefault(domain, {})[user_id] = one_time_keys - set_tag("local_key_query", local_query) - set_tag("remote_key_query", remote_queries) + set_tag("local_key_query", str(local_query)) + set_tag("remote_key_query", str(remote_queries)) results = await self.store.claim_e2e_one_time_keys(local_query) @@ -506,7 +508,7 @@ async def claim_client_keys(destination: str) -> None: failure = _exception_to_failure(e) failures[destination] = failure set_tag("error", True) - set_tag("reason", failure) + set_tag("reason", str(failure)) await make_deferred_yieldable( defer.gatherResults( @@ -609,7 +611,7 @@ async def upload_keys_for_user( result = await self.store.count_e2e_one_time_keys(user_id, device_id) - set_tag("one_time_key_counts", result) + set_tag("one_time_key_counts", str(result)) return {"one_time_key_counts": result} async def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 446f509bdc3b..28dc08c22a36 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, Optional +from typing import TYPE_CHECKING, Dict, Optional, cast from typing_extensions import Literal @@ -97,7 +97,7 @@ async def get_room_keys( user_id, version, room_id, session_id ) - log_kv(results) + log_kv(cast(JsonDict, results)) return results @trace diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index c74117c19aae..16f20c8be7bc 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import itertools import logging from http import HTTPStatus @@ -347,7 +348,7 @@ async def on_send_membership_event( event.internal_metadata.send_on_behalf_of = origin context = await self._state_handler.compute_event_context(event) - context = await self._check_event_auth(origin, event, context) + await self._check_event_auth(origin, event, context) if context.rejected: raise SynapseError( 403, f"{event.membership} event was rejected", Codes.FORBIDDEN @@ -485,7 +486,7 @@ async def process_remote_join( partial_state=partial_state, ) - context = await self._check_event_auth(origin, event, context) + await self._check_event_auth(origin, event, context) if context.rejected: raise SynapseError(400, "Join event was rejected") @@ -765,10 +766,24 @@ async def _process_pulled_event( """ logger.info("Processing pulled event %s", event) - # these should not be outliers. - assert ( - not event.internal_metadata.is_outlier() - ), "pulled event unexpectedly flagged as outlier" + # This function should not be used to persist outliers (use something + # else) because this does a bunch of operations that aren't necessary + # (extra work; in particular, it makes sure we have all the prev_events + # and resolves the state across those prev events). If you happen to run + # into a situation where the event you're trying to process/backfill is + # marked as an `outlier`, then you should update that spot to return an + # `EventBase` copy that doesn't have `outlier` flag set. + # + # `EventBase` is used to represent both an event we have not yet + # persisted, and one that we have persisted and now keep in the cache. + # In an ideal world this method would only be called with the first type + # of event, but it turns out that's not actually the case and for + # example, you could get an event from cache that is marked as an + # `outlier` (fix up that spot though). + assert not event.internal_metadata.is_outlier(), ( + "Outlier event passed to _process_pulled_event. " + "To persist an event as a non-outlier, make sure to pass in a copy without `event.internal_metadata.outlier = true`." + ) event_id = event.event_id @@ -778,7 +793,7 @@ async def _process_pulled_event( if existing: if not existing.internal_metadata.is_outlier(): logger.info( - "Ignoring received event %s which we have already seen", + "_process_pulled_event: Ignoring received event %s which we have already seen", event_id, ) return @@ -1036,6 +1051,9 @@ async def _get_state_ids_after_missing_prev_event( # XXX: this doesn't sound right? it means that we'll end up with incomplete # state. failed_to_fetch = desired_events - event_metadata.keys() + # `event_id` could be missing from `event_metadata` because it's not necessarily + # a state event. We've already checked that we've fetched it above. + failed_to_fetch.discard(event_id) if failed_to_fetch: logger.warning( "Failed to fetch missing state events for %s %s", @@ -1116,11 +1134,7 @@ async def _process_received_pdu( state_ids_before_event=state_ids, ) try: - context = await self._check_event_auth( - origin, - event, - context, - ) + await self._check_event_auth(origin, event, context) except AuthError as e: # This happens only if we couldn't find the auth events. We'll already have # logged a warning, so now we just convert to a FederationError. @@ -1315,6 +1329,53 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No marker_event, ) + async def backfill_event_id( + self, destination: str, room_id: str, event_id: str + ) -> EventBase: + """Backfill a single event and persist it as a non-outlier which means + we also pull in all of the state and auth events necessary for it. + + Args: + destination: The homeserver to pull the given event_id from. + room_id: The room where the event is from. + event_id: The event ID to backfill. + + Raises: + FederationError if we are unable to find the event from the destination + """ + logger.info( + "backfill_event_id: event_id=%s from destination=%s", event_id, destination + ) + + room_version = await self._store.get_room_version(room_id) + + event_from_response = await self._federation_client.get_pdu( + [destination], + event_id, + room_version, + ) + + if not event_from_response: + raise FederationError( + "ERROR", + 404, + "Unable to find event_id=%s from destination=%s to backfill." + % (event_id, destination), + affected=event_id, + ) + + # Persist the event we just fetched, including pulling all of the state + # and auth events to de-outlier it. This also sets up the necessary + # `state_groups` for the event. + await self._process_pulled_events( + destination, + [event_from_response], + # Prevent notifications going to clients + backfilled=True, + ) + + return event_from_response + async def _get_events_and_persist( self, destination: str, room_id: str, event_ids: Collection[str] ) -> None: @@ -1495,11 +1556,8 @@ async def prep(event: EventBase) -> None: ) async def _check_event_auth( - self, - origin: str, - event: EventBase, - context: EventContext, - ) -> EventContext: + self, origin: str, event: EventBase, context: EventContext + ) -> None: """ Checks whether an event should be rejected (for failing auth checks). @@ -1509,9 +1567,6 @@ async def _check_event_auth( context: The event context. - Returns: - The updated context object. - Raises: AuthError if we were unable to find copies of the event's auth events. (Most other failures just cause us to set `context.rejected`.) @@ -1526,7 +1581,7 @@ async def _check_event_auth( logger.warning("While validating received event %r: %s", event, e) # TODO: use a different rejected reason here? context.rejected = RejectedReason.AUTH_ERROR - return context + return # next, check that we have all of the event's auth events. # @@ -1538,6 +1593,9 @@ async def _check_event_auth( ) # ... and check that the event passes auth at those auth events. + # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu: + # 4. Passes authorization rules based on the event’s auth events, + # otherwise it is rejected. try: await check_state_independent_auth_rules(self._store, event) check_state_dependent_auth_rules(event, claimed_auth_events) @@ -1546,55 +1604,90 @@ async def _check_event_auth( "While checking auth of %r against auth_events: %s", event, e ) context.rejected = RejectedReason.AUTH_ERROR - return context + return - # now check auth against what we think the auth events *should* be. - event_types = event_auth.auth_types_for_event(event.room_version, event) - prev_state_ids = await context.get_prev_state_ids( - StateFilter.from_types(event_types) + # now check the auth rules pass against the room state before the event + # https://spec.matrix.org/v1.3/server-server-api/#checks-performed-on-receipt-of-a-pdu: + # 5. Passes authorization rules based on the state before the event, + # otherwise it is rejected. + # + # ... however, if we only have partial state for the room, then there is a good + # chance that we'll be missing some of the state needed to auth the new event. + # So, we state-resolve the auth events that we are given against the state that + # we know about, which ensures things like bans are applied. (Note that we'll + # already have checked we have all the auth events, in + # _load_or_fetch_auth_events_for_event above) + if context.partial_state: + room_version = await self._store.get_room_version_id(event.room_id) + + local_state_id_map = await context.get_prev_state_ids() + claimed_auth_events_id_map = { + (ev.type, ev.state_key): ev.event_id for ev in claimed_auth_events + } + + state_for_auth_id_map = ( + await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version, + [local_state_id_map, claimed_auth_events_id_map], + event_map=None, + state_res_store=StateResolutionStore(self._store), + ) + ) + else: + event_types = event_auth.auth_types_for_event(event.room_version, event) + state_for_auth_id_map = await context.get_prev_state_ids( + StateFilter.from_types(event_types) + ) + + calculated_auth_event_ids = self._event_auth_handler.compute_auth_events( + event, state_for_auth_id_map, for_verification=True ) - auth_events_ids = self._event_auth_handler.compute_auth_events( - event, prev_state_ids, for_verification=True + # if those are the same, we're done here. + if collections.Counter(event.auth_event_ids()) == collections.Counter( + calculated_auth_event_ids + ): + return + + # otherwise, re-run the auth checks based on what we calculated. + calculated_auth_events = await self._store.get_events_as_list( + calculated_auth_event_ids ) - auth_events_x = await self._store.get_events(auth_events_ids) + + # log the differences + + claimed_auth_event_map = {(e.type, e.state_key): e for e in claimed_auth_events} calculated_auth_event_map = { - (e.type, e.state_key): e for e in auth_events_x.values() + (e.type, e.state_key): e for e in calculated_auth_events } + logger.info( + "event's auth_events are different to our calculated auth_events. " + "Claimed but not calculated: %s. Calculated but not claimed: %s", + [ + ev + for k, ev in claimed_auth_event_map.items() + if k not in calculated_auth_event_map + or calculated_auth_event_map[k].event_id != ev.event_id + ], + [ + ev + for k, ev in calculated_auth_event_map.items() + if k not in claimed_auth_event_map + or claimed_auth_event_map[k].event_id != ev.event_id + ], + ) try: - updated_auth_events = await self._update_auth_events_for_auth( + check_state_dependent_auth_rules(event, calculated_auth_events) + except AuthError as e: + logger.warning( + "While checking auth of %r against room state before the event: %s", event, - calculated_auth_event_map=calculated_auth_event_map, - ) - except Exception: - # We don't really mind if the above fails, so lets not fail - # processing if it does. However, it really shouldn't fail so - # let's still log as an exception since we'll still want to fix - # any bugs. - logger.exception( - "Failed to double check auth events for %s with remote. " - "Ignoring failure and continuing processing of event.", - event.event_id, - ) - updated_auth_events = None - - if updated_auth_events: - context = await self._update_context_for_auth_events( - event, context, updated_auth_events + e, ) - auth_events_for_auth = updated_auth_events - else: - auth_events_for_auth = calculated_auth_event_map - - try: - check_state_dependent_auth_rules(event, auth_events_for_auth.values()) - except AuthError as e: - logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR - return context - async def _maybe_kick_guest_users(self, event: EventBase) -> None: if event.type != EventTypes.GuestAccess: return @@ -1618,11 +1711,21 @@ async def _check_for_soft_fail( """Checks if we should soft fail the event; if so, marks the event as such. + Does nothing for events in rooms with partial state, since we may not have an + accurate membership event for the sender in the current state. + Args: event state_ids: The state at the event if we don't have all the event's prev events origin: The host the event originates from. """ + if await self._store.is_partial_state_room(event.room_id): + # We might not know the sender's membership in the current state, so don't + # soft fail anything. Even if we do have a membership for the sender in the + # current state, it may have been derived from state resolution between + # partial and full state and may not be accurate. + return + extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id) extrem_ids = set(extrem_ids_list) prev_event_ids = set(event.prev_event_ids()) @@ -1704,93 +1807,6 @@ async def _check_for_soft_fail( soft_failed_event_counter.inc() event.internal_metadata.soft_failed = True - async def _update_auth_events_for_auth( - self, - event: EventBase, - calculated_auth_event_map: StateMap[EventBase], - ) -> Optional[StateMap[EventBase]]: - """Helper for _check_event_auth. See there for docs. - - Checks whether a given event has the expected auth events. If it - doesn't then we talk to the remote server to compare state to see if - we can come to a consensus (e.g. if one server missed some valid - state). - - This attempts to resolve any potential divergence of state between - servers, but is not essential and so failures should not block further - processing of the event. - - Args: - event: - - calculated_auth_event_map: - Our calculated auth_events based on the state of the room - at the event's position in the DAG. - - Returns: - updated auth event map, or None if no changes are needed. - - """ - assert not event.internal_metadata.outlier - - # check for events which are in the event's claimed auth_events, but not - # in our calculated event map. - event_auth_events = set(event.auth_event_ids()) - different_auth = event_auth_events.difference( - e.event_id for e in calculated_auth_event_map.values() - ) - - if not different_auth: - return None - - logger.info( - "auth_events refers to events which are not in our calculated auth " - "chain: %s", - different_auth, - ) - - # XXX: currently this checks for redactions but I'm not convinced that is - # necessary? - different_events = await self._store.get_events_as_list(different_auth) - - # double-check they're all in the same room - we should already have checked - # this but it doesn't hurt to check again. - for d in different_events: - assert ( - d.room_id == event.room_id - ), f"Event {event.event_id} refers to auth_event {d.event_id} which is in a different room" - - # now we state-resolve between our own idea of the auth events, and the remote's - # idea of them. - - local_state = calculated_auth_event_map.values() - remote_auth_events = dict(calculated_auth_event_map) - remote_auth_events.update({(d.type, d.state_key): d for d in different_events}) - remote_state = remote_auth_events.values() - - room_version = await self._store.get_room_version_id(event.room_id) - new_state = await self._state_handler.resolve_events( - room_version, (local_state, remote_state), event - ) - different_state = { - (d.type, d.state_key): d - for d in new_state.values() - if calculated_auth_event_map.get((d.type, d.state_key)) != d - } - if not different_state: - logger.info("State res returned no new state") - return None - - logger.info( - "After state res: updating auth_events with new state %s", - different_state.values(), - ) - - # take a copy of calculated_auth_event_map before we modify it. - auth_events = dict(calculated_auth_event_map) - auth_events.update(different_state) - return auth_events - async def _load_or_fetch_auth_events_for_event( self, destination: str, event: EventBase ) -> Collection[EventBase]: @@ -1888,61 +1904,6 @@ async def _get_remote_auth_chain_for_event( await self._auth_and_persist_outliers(room_id, remote_auth_events) - async def _update_context_for_auth_events( - self, event: EventBase, context: EventContext, auth_events: StateMap[EventBase] - ) -> EventContext: - """Update the state_ids in an event context after auth event resolution, - storing the changes as a new state group. - - Args: - event: The event we're handling the context for - - context: initial event context - - auth_events: Events to update in the event context. - - Returns: - new event context - """ - # exclude the state key of the new event from the current_state in the context. - if event.is_state(): - event_key: Optional[Tuple[str, str]] = (event.type, event.state_key) - else: - event_key = None - state_updates = { - k: a.event_id for k, a in auth_events.items() if k != event_key - } - - current_state_ids = await context.get_current_state_ids() - current_state_ids = dict(current_state_ids) # type: ignore - - current_state_ids.update(state_updates) - - prev_state_ids = await context.get_prev_state_ids() - prev_state_ids = dict(prev_state_ids) - - prev_state_ids.update({k: a.event_id for k, a in auth_events.items()}) - - # create a new state group as a delta from the existing one. - prev_group = context.state_group - state_group = await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=prev_group, - delta_ids=state_updates, - current_state_ids=current_state_ids, - ) - - return EventContext.with_state( - storage=self._storage_controllers, - state_group=state_group, - state_group_before_event=context.state_group_before_event, - state_delta_due_to_event=state_updates, - prev_group=prev_group, - delta_ids=state_updates, - partial_state=context.partial_state, - ) - async def _run_push_actions_and_persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False ) -> None: @@ -2093,6 +2054,10 @@ async def _notify_persisted_event( event, event_pos, max_stream_token, extra_users=extra_users ) + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + # TODO retrieve the previous state, and exclude join -> join transitions + self._notifier.notify_user_joined_room(event.event_id, event.room_id) + def _sanity_check_event(self, ev: EventBase) -> None: """ Do some early sanity checks of a received event diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 9bca2bc4b24e..e5afe84df9fb 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -163,8 +163,7 @@ async def bind_threepid( sid: str, mxid: str, id_server: str, - id_access_token: Optional[str] = None, - use_v2: bool = True, + id_access_token: str, ) -> JsonDict: """Bind a 3PID to an identity server @@ -174,8 +173,7 @@ async def bind_threepid( mxid: The MXID to bind the 3PID to id_server: The domain of the identity server to query id_access_token: The access token to authenticate to the identity - server with, if necessary. Required if use_v2 is true - use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True + server with Raises: SynapseError: On any of the following conditions @@ -187,24 +185,15 @@ async def bind_threepid( """ logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server) - # If an id_access_token is not supplied, force usage of v1 - if id_access_token is None: - use_v2 = False - if not valid_id_server_location(id_server): raise SynapseError( 400, "id_server must be a valid hostname with optional port and path components", ) - # Decide which API endpoint URLs to use - headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} - if use_v2: - bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) - headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore - else: - bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,) + bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,) + headers = {"Authorization": create_id_access_token_header(id_access_token)} try: # Use the blacklisting http client as this call is only to identity servers @@ -223,21 +212,14 @@ async def bind_threepid( return data except HttpResponseException as e: - if e.code != 404 or not use_v2: - logger.error("3PID bind failed with Matrix error: %r", e) - raise e.to_synapse_error() + logger.error("3PID bind failed with Matrix error: %r", e) + raise e.to_synapse_error() except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? return data - logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url) - res = await self.bind_threepid( - client_secret, sid, mxid, id_server, id_access_token, use_v2=False - ) - return res - async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: """Attempt to remove a 3PID from an identity server, or if one is not provided, all identity servers we're aware the binding is present on @@ -300,8 +282,8 @@ async def try_unbind_threepid_with_id_server( "id_server must be a valid hostname with optional port and path components", ) - url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) - url_bytes = b"/_matrix/identity/api/v1/3pid/unbind" + url = "https://%s/_matrix/identity/v2/3pid/unbind" % (id_server,) + url_bytes = b"/_matrix/identity/v2/3pid/unbind" content = { "mxid": mxid, @@ -434,7 +416,7 @@ async def send_threepid_validation( return session_id - async def requestEmailToken( + async def request_email_token( self, id_server: str, email: str, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 1980e37daeca..bd7baef0510b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -463,6 +463,7 @@ def __init__(self, hs: "HomeServer"): ) self._events_shard_config = self.config.worker.events_shard_config self._instance_name = hs.get_instance_name() + self._notifier = hs.get_notifier() self.room_prejoin_state_types = self.hs.config.api.room_prejoin_state @@ -1444,7 +1445,12 @@ async def cache_joined_hosts_for_event( if state_entry.state_group in self._external_cache_joined_hosts_updates: return - joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) + state = await state_entry.get_state( + self._storage_controllers.state, StateFilter.all() + ) + joined_hosts = await self.store.get_joined_hosts( + event.room_id, state, state_entry + ) # Note that the expiry times must be larger than the expiry time in # _external_cache_joined_hosts_updates. @@ -1545,6 +1551,16 @@ async def persist_and_notify_client_event( requester, is_admin_redaction=is_admin_redaction ) + if event.type == EventTypes.Member and event.membership == Membership.JOIN: + ( + current_membership, + _, + ) = await self.store.get_local_current_membership_for_user_in_room( + event.state_key, event.room_id + ) + if current_membership != Membership.JOIN: + self._notifier.notify_user_joined_room(event.event_id, event.room_id) + await self._maybe_kick_guest_users(event, context) if event.type == EventTypes.CanonicalAlias: @@ -1844,13 +1860,8 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: # For each room we need to find a joined member we can use to send # the dummy event with. - latest_event_ids = await self.store.get_prev_events_for_room(room_id) - members = await self.state.get_current_users_in_room( - room_id, latest_event_ids=latest_event_ids - ) + members = await self.store.get_local_users_in_room(room_id) for user_id in members: - if not self.hs.is_mine_id(user_id): - continue requester = create_requester(user_id, authenticated_entity=self.server_name) try: event, context = await self.create_event( @@ -1861,7 +1872,6 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: "room_id": room_id, "sender": user_id, }, - prev_event_ids=latest_event_ids, ) event.internal_metadata.proactively_send = False diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 895ea63ed33b..741504ba9f26 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -34,7 +34,6 @@ Callable, Collection, Dict, - FrozenSet, Generator, Iterable, List, @@ -42,7 +41,6 @@ Set, Tuple, Type, - Union, ) from prometheus_client import Counter @@ -68,7 +66,6 @@ from synapse.streams import EventSource from synapse.types import JsonDict, StreamKeyType, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer -from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -1656,15 +1653,18 @@ async def get_new_events( # doesn't return. C.f. #5503. return [], max_token - # Figure out which other users this user should receive updates for - users_interested_in = await self._get_interested_in(user, explicit_room_id) + # Figure out which other users this user should explicitly receive + # updates for + additional_users_interested_in = ( + await self.get_presence_router().get_interested_users(user.to_string()) + ) # We have a set of users that we're interested in the presence of. We want to # cross-reference that with the users that have actually changed their presence. # Check whether this user should see all user updates - if users_interested_in == PresenceRouter.ALL_USERS: + if additional_users_interested_in == PresenceRouter.ALL_USERS: # Provide presence state for all users presence_updates = await self._filter_all_presence_updates_for_user( user_id, include_offline, from_key @@ -1673,34 +1673,47 @@ async def get_new_events( return presence_updates, max_token # Make mypy happy. users_interested_in should now be a set - assert not isinstance(users_interested_in, str) + assert not isinstance(additional_users_interested_in, str) + + # We always care about our own presence. + additional_users_interested_in.add(user_id) + + if explicit_room_id: + user_ids = await self.store.get_users_in_room(explicit_room_id) + additional_users_interested_in.update(user_ids) # The set of users that we're interested in and that have had a presence update. # We'll actually pull the presence updates for these users at the end. - interested_and_updated_users: Union[Set[str], FrozenSet[str]] = set() + interested_and_updated_users: Collection[str] if from_key is not None: # First get all users that have had a presence update updated_users = stream_change_cache.get_all_entities_changed(from_key) # Cross-reference users we're interested in with those that have had updates. - # Use a slightly-optimised method for processing smaller sets of updates. - if updated_users is not None and len(updated_users) < 500: - # For small deltas, it's quicker to get all changes and then - # cross-reference with the users we're interested in + if updated_users is not None: + # If we have the full list of changes for presence we can + # simply check which ones share a room with the user. get_updates_counter.labels("stream").inc() - for other_user_id in updated_users: - if other_user_id in users_interested_in: - # mypy thinks this variable could be a FrozenSet as it's possibly set - # to one in the `get_entities_changed` call below, and `add()` is not - # method on a FrozenSet. That doesn't affect us here though, as - # `interested_and_updated_users` is clearly a set() above. - interested_and_updated_users.add(other_user_id) # type: ignore + + sharing_users = await self.store.do_users_share_a_room( + user_id, updated_users + ) + + interested_and_updated_users = ( + sharing_users.union(additional_users_interested_in) + ).intersection(updated_users) + else: # Too many possible updates. Find all users we can see and check # if any of them have changed. get_updates_counter.labels("full").inc() + users_interested_in = ( + await self.store.get_users_who_share_room_with_user(user_id) + ) + users_interested_in.update(additional_users_interested_in) + interested_and_updated_users = ( stream_change_cache.get_entities_changed( users_interested_in, from_key @@ -1709,7 +1722,10 @@ async def get_new_events( else: # No from_key has been specified. Return the presence for all users # this user is interested in - interested_and_updated_users = users_interested_in + interested_and_updated_users = ( + await self.store.get_users_who_share_room_with_user(user_id) + ) + interested_and_updated_users.update(additional_users_interested_in) # Retrieve the current presence state for each user users_to_state = await self.get_presence_handler().current_state_for_users( @@ -1804,62 +1820,6 @@ def _filter_offline_presence_state( def get_current_key(self) -> int: return self.store.get_current_presence_token() - @cached(num_args=2, cache_context=True) - async def _get_interested_in( - self, - user: UserID, - explicit_room_id: Optional[str] = None, - cache_context: Optional[_CacheContext] = None, - ) -> Union[Set[str], str]: - """Returns the set of users that the given user should see presence - updates for. - - Args: - user: The user to retrieve presence updates for. - explicit_room_id: The users that are in the room will be returned. - - Returns: - A set of user IDs to return presence updates for, or "ALL" to return all - known updates. - """ - user_id = user.to_string() - users_interested_in = set() - users_interested_in.add(user_id) # So that we receive our own presence - - # cache_context isn't likely to ever be None due to the @cached decorator, - # but we can't have a non-optional argument after the optional argument - # explicit_room_id either. Assert cache_context is not None so we can use it - # without mypy complaining. - assert cache_context - - # Check with the presence router whether we should poll additional users for - # their presence information - additional_users = await self.get_presence_router().get_interested_users( - user.to_string() - ) - if additional_users == PresenceRouter.ALL_USERS: - # If the module requested that this user see the presence updates of *all* - # users, then simply return that instead of calculating what rooms this - # user shares - return PresenceRouter.ALL_USERS - - # Add the additional users from the router - users_interested_in.update(additional_users) - - # Find the users who share a room with this user - users_who_share_room = await self.store.get_users_who_share_room_with_user( - user_id, on_invalidate=cache_context.invalidate - ) - users_interested_in.update(users_who_share_room) - - if explicit_room_id: - user_ids = await self.store.get_users_in_room( - explicit_room_id, on_invalidate=cache_context.invalidate - ) - users_interested_in.update(user_ids) - - return users_interested_in - def handle_timeouts( user_states: List[UserPresenceState], diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a54f163c0a41..55395457c3d1 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -889,7 +889,11 @@ async def create_room( # override any attempt to set room versions via the creation_content creation_content["room_version"] = room_version.identifier - last_stream_id = await self._send_events_for_new_room( + ( + last_stream_id, + last_sent_event_id, + depth, + ) = await self._send_events_for_new_room( requester, room_id, preset_config=preset_config, @@ -905,7 +909,7 @@ async def create_room( if "name" in config: name = config["name"] ( - _, + name_event, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, @@ -917,12 +921,16 @@ async def create_room( "content": {"name": name}, }, ratelimit=False, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = name_event.event_id + depth += 1 if "topic" in config: topic = config["topic"] ( - _, + topic_event, last_stream_id, ) = await self.event_creation_handler.create_and_send_nonmember_event( requester, @@ -934,7 +942,11 @@ async def create_room( "content": {"topic": topic}, }, ratelimit=False, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = topic_event.event_id + depth += 1 # we avoid dropping the lock between invites, as otherwise joins can # start coming in and making the createRoom slow. @@ -949,7 +961,7 @@ async def create_room( for invitee in invite_list: ( - _, + member_event_id, last_stream_id, ) = await self.room_member_handler.update_membership_locked( requester, @@ -959,7 +971,11 @@ async def create_room( ratelimit=False, content=content, new_room=True, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = member_event_id + depth += 1 for invite_3pid in invite_3pid_list: id_server = invite_3pid["id_server"] @@ -968,7 +984,10 @@ async def create_room( medium = invite_3pid["medium"] # Note that do_3pid_invite can raise a ShadowBanError, but this was # handled above by emptying invite_3pid_list. - last_stream_id = await self.hs.get_room_member_handler().do_3pid_invite( + ( + member_event_id, + last_stream_id, + ) = await self.hs.get_room_member_handler().do_3pid_invite( room_id, requester.user, medium, @@ -977,7 +996,11 @@ async def create_room( requester, txn_id=None, id_access_token=id_access_token, + prev_event_ids=[last_sent_event_id], + depth=depth, ) + last_sent_event_id = member_event_id + depth += 1 result = {"room_id": room_id} @@ -1005,20 +1028,22 @@ async def _send_events_for_new_room( power_level_content_override: Optional[JsonDict] = None, creator_join_profile: Optional[JsonDict] = None, ratelimit: bool = True, - ) -> int: + ) -> Tuple[int, str, int]: """Sends the initial events into a new room. `power_level_content_override` doesn't apply when initial state has power level state event content. Returns: - The stream_id of the last event persisted. + A tuple containing the stream ID, event ID and depth of the last + event sent to the room. """ creator_id = creator.user.to_string() event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} + depth = 1 last_sent_event_id: Optional[str] = None def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: @@ -1031,6 +1056,7 @@ def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: nonlocal last_sent_event_id + nonlocal depth event = create(etype, content, **kwargs) logger.debug("Sending %s in new room", etype) @@ -1047,9 +1073,11 @@ async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: # Note: we don't pass state_event_ids here because this triggers # an additional query per event to look them up from the events table. prev_event_ids=[last_sent_event_id] if last_sent_event_id else [], + depth=depth, ) last_sent_event_id = sent_event.event_id + depth += 1 return last_stream_id @@ -1075,6 +1103,7 @@ async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: content=creator_join_profile, new_room=True, prev_event_ids=[last_sent_event_id], + depth=depth, ) last_sent_event_id = member_event_id @@ -1168,7 +1197,7 @@ async def send(etype: str, content: JsonDict, **kwargs: Any) -> int: content={"algorithm": RoomEncryptionAlgorithms.DEFAULT}, ) - return last_sent_stream_id + return last_sent_stream_id, last_sent_event_id, depth def _generate_room_id(self) -> str: """Generates a random room ID. @@ -1355,6 +1384,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.state_handler = hs.get_state_handler() self.federation_client = hs.get_federation_client() + self.federation_event_handler = hs.get_federation_event_handler() self._storage_controllers = hs.get_storage_controllers() async def get_event_for_timestamp( @@ -1450,38 +1480,68 @@ async def get_event_for_timestamp( remote_response, ) - # TODO: Do we want to persist this as an extremity? - # TODO: I think ideally, we would try to backfill from - # this event and run this whole - # `get_event_for_timestamp` function again to make sure - # they didn't give us an event from their gappy history. remote_event_id = remote_response.event_id - origin_server_ts = remote_response.origin_server_ts + remote_origin_server_ts = remote_response.origin_server_ts + + # Backfill this event so we can get a pagination token for + # it with `/context` and paginate `/messages` from this + # point. + # + # TODO: The requested timestamp may lie in a part of the + # event graph that the remote server *also* didn't have, + # in which case they will have returned another event + # which may be nowhere near the requested timestamp. In + # the future, we may need to reconcile that gap and ask + # other homeservers, and/or extend `/timestamp_to_event` + # to return events on *both* sides of the timestamp to + # help reconcile the gap faster. + remote_event = ( + await self.federation_event_handler.backfill_event_id( + domain, room_id, remote_event_id + ) + ) + + # XXX: When we see that the remote server is not trustworthy, + # maybe we should not ask them first in the future. + if remote_origin_server_ts != remote_event.origin_server_ts: + logger.info( + "get_event_for_timestamp: Remote server (%s) claimed that remote_event_id=%s occured at remote_origin_server_ts=%s but that isn't true (actually occured at %s). Their claims are dubious and we should consider not trusting them.", + domain, + remote_event_id, + remote_origin_server_ts, + remote_event.origin_server_ts, + ) # Only return the remote event if it's closer than the local event if not local_event or ( - abs(origin_server_ts - timestamp) + abs(remote_event.origin_server_ts - timestamp) < abs(local_event.origin_server_ts - timestamp) ): - return remote_event_id, origin_server_ts + logger.info( + "get_event_for_timestamp: returning remote_event_id=%s (%s) since it's closer to timestamp=%s than local_event=%s (%s)", + remote_event_id, + remote_event.origin_server_ts, + timestamp, + local_event.event_id if local_event else None, + local_event.origin_server_ts if local_event else None, + ) + return remote_event_id, remote_origin_server_ts except (HttpResponseException, InvalidResponseError) as ex: # Let's not put a high priority on some other homeserver # failing to respond or giving a random response logger.debug( - "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", + "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", domain, type(ex).__name__, ex, ex.args, ) - except Exception as ex: + except Exception: # But we do want to see some exceptions in our code logger.warning( - "Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s", + "get_event_for_timestamp: Failed to fetch /timestamp_to_event from %s because of exception", domain, - type(ex).__name__, - ex, - ex.args, + exc_info=True, ) # To appease mypy, we have to add both of these conditions to check for diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 04c44b2ccb2d..30b4cb23df3a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -94,12 +94,29 @@ def __init__(self, hs: "HomeServer"): rate_hz=hs.config.ratelimiting.rc_joins_local.per_second, burst_count=hs.config.ratelimiting.rc_joins_local.burst_count, ) + # Tracks joins from local users to rooms this server isn't a member of. + # I.e. joins this server makes by requesting /make_join /send_join from + # another server. self._join_rate_limiter_remote = Ratelimiter( store=self.store, clock=self.clock, rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second, burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count, ) + # TODO: find a better place to keep this Ratelimiter. + # It needs to be + # - written to by event persistence code + # - written to by something which can snoop on replication streams + # - read by the RoomMemberHandler to rate limit joins from local users + # - read by the FederationServer to rate limit make_joins and send_joins from + # other homeservers + # I wonder if a homeserver-wide collection of rate limiters might be cleaner? + self._join_rate_per_room_limiter = Ratelimiter( + store=self.store, + clock=self.clock, + rate_hz=hs.config.ratelimiting.rc_joins_per_room.per_second, + burst_count=hs.config.ratelimiting.rc_joins_per_room.burst_count, + ) # Ratelimiter for invites, keyed by room (across all issuers, all # recipients). @@ -136,6 +153,18 @@ def __init__(self, hs: "HomeServer"): ) self.request_ratelimiter = hs.get_request_ratelimiter() + hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room) + + def _on_user_joined_room(self, event_id: str, room_id: str) -> None: + """Notify the rate limiter that a room join has occurred. + + Use this to inform the RoomMemberHandler about joins that have either + - taken place on another homeserver, or + - on another worker in this homeserver. + Joins actioned by this worker should use the usual `ratelimit` method, which + checks the limit and increments the counter in one go. + """ + self._join_rate_per_room_limiter.record_action(requester=None, key=room_id) @abc.abstractmethod async def _remote_join( @@ -285,6 +314,7 @@ async def _local_membership_update( allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, txn_id: Optional[str] = None, ratelimit: bool = True, content: Optional[dict] = None, @@ -315,6 +345,9 @@ async def _local_membership_update( prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. txn_id: ratelimit: @@ -370,6 +403,7 @@ async def _local_membership_update( allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, require_consent=require_consent, outlier=outlier, historical=historical, @@ -391,6 +425,9 @@ async def _local_membership_update( # up blocking profile updates. if newly_joined and ratelimit: await self._join_rate_limiter_local.ratelimit(requester) + await self._join_rate_per_room_limiter.ratelimit( + requester, key=room_id, update=False + ) result_event = await self.event_creation_handler.handle_new_client_event( requester, @@ -466,6 +503,7 @@ async def update_membership( allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, ) -> Tuple[str, int]: """Update a user's membership in a room. @@ -501,6 +539,9 @@ async def update_membership( prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: A tuple of the new event ID and stream ID. @@ -540,6 +581,7 @@ async def update_membership( allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, ) return result @@ -562,6 +604,7 @@ async def update_membership_locked( allow_no_prev_events: bool = False, prev_event_ids: Optional[List[str]] = None, state_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, ) -> Tuple[str, int]: """Helper for update_membership. @@ -599,6 +642,9 @@ async def update_membership_locked( prev_events are set so we need to set them ourself via this argument. This should normally be left as None, which will cause the auth_event_ids to be calculated based on the room state at the prev_events. + depth: Override the depth used to order the event in the DAG. + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: A tuple of the new event ID and stream ID. @@ -732,6 +778,7 @@ async def update_membership_locked( allow_no_prev_events=allow_no_prev_events, prev_event_ids=prev_event_ids, state_event_ids=state_event_ids, + depth=depth, content=content, require_consent=require_consent, outlier=outlier, @@ -740,14 +787,14 @@ async def update_membership_locked( latest_event_ids = await self.store.get_prev_events_for_room(room_id) - current_state_ids = await self.state_handler.get_current_state_ids( - room_id, latest_event_ids=latest_event_ids + state_before_join = await self.state_handler.compute_state_after_events( + room_id, latest_event_ids ) # TODO: Refactor into dictionary of explicitly allowed transitions # between old and new state, with specific error messages for some # transitions and generic otherwise - old_state_id = current_state_ids.get((EventTypes.Member, target.to_string())) + old_state_id = state_before_join.get((EventTypes.Member, target.to_string())) if old_state_id: old_state = await self.store.get_event(old_state_id, allow_none=True) old_membership = old_state.content.get("membership") if old_state else None @@ -798,11 +845,11 @@ async def update_membership_locked( if action == "kick": raise AuthError(403, "The target user is not in the room") - is_host_in_room = await self._is_host_in_room(current_state_ids) + is_host_in_room = await self._is_host_in_room(state_before_join) if effective_membership_state == Membership.JOIN: if requester.is_guest: - guest_can_join = await self._can_guest_join(current_state_ids) + guest_can_join = await self._can_guest_join(state_before_join) if not guest_can_join: # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. @@ -840,13 +887,23 @@ async def update_membership_locked( # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( - target.to_string(), room_id, remote_room_hosts, content, is_host_in_room + target.to_string(), + room_id, + remote_room_hosts, + content, + is_host_in_room, + state_before_join, ) if remote_join: if ratelimit: await self._join_rate_limiter_remote.ratelimit( requester, ) + await self._join_rate_per_room_limiter.ratelimit( + requester, + key=room_id, + update=False, + ) inviter = await self._get_inviter(target.to_string(), room_id) if inviter and not self.hs.is_mine(inviter): @@ -967,6 +1024,7 @@ async def update_membership_locked( ratelimit=ratelimit, prev_event_ids=latest_event_ids, state_event_ids=state_event_ids, + depth=depth, content=content, require_consent=require_consent, outlier=outlier, @@ -979,6 +1037,7 @@ async def _should_perform_remote_join( remote_room_hosts: List[str], content: JsonDict, is_host_in_room: bool, + state_before_join: StateMap[str], ) -> Tuple[bool, List[str]]: """ Check whether the server should do a remote join (as opposed to a local @@ -998,6 +1057,8 @@ async def _should_perform_remote_join( content: The content to use as the event body of the join. This may be modified. is_host_in_room: True if the host is in the room. + state_before_join: The state before the join event (i.e. the resolution of + the states after its parent events). Returns: A tuple of: @@ -1014,20 +1075,17 @@ async def _should_perform_remote_join( # If the host is in the room, but not one of the authorised hosts # for restricted join rules, a remote join must be used. room_version = await self.store.get_room_version(room_id) - current_state_ids = await self._storage_controllers.state.get_current_state_ids( - room_id - ) # If restricted join rules are not being used, a local join can always # be used. if not await self.event_auth_handler.has_restricted_join_rules( - current_state_ids, room_version + state_before_join, room_version ): return False, [] # If the user is invited to the room or already joined, the join # event can always be issued locally. - prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None) + prev_member_event_id = state_before_join.get((EventTypes.Member, user_id), None) prev_member_event = None if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) @@ -1042,10 +1100,10 @@ async def _should_perform_remote_join( # # If not, generate a new list of remote hosts based on which # can issue invites. - event_map = await self.store.get_events(current_state_ids.values()) + event_map = await self.store.get_events(state_before_join.values()) current_state = { state_key: event_map[event_id] - for state_key, event_id in current_state_ids.items() + for state_key, event_id in state_before_join.items() } allowed_servers = get_servers_from_users( get_users_which_can_issue_invite(current_state) @@ -1059,7 +1117,7 @@ async def _should_perform_remote_join( # Ensure the member should be allowed access via membership in a room. await self.event_auth_handler.check_restricted_join_rules( - current_state_ids, room_version, user_id, prev_member_event + state_before_join, room_version, user_id, prev_member_event ) # If this is going to be a local join, additional information must @@ -1069,7 +1127,7 @@ async def _should_perform_remote_join( EventContentFields.AUTHORISING_USER ] = await self.event_auth_handler.get_user_which_could_invite( room_id, - current_state_ids, + state_before_join, ) return False, [] @@ -1322,7 +1380,9 @@ async def do_3pid_invite( requester: Requester, txn_id: Optional[str], id_access_token: Optional[str] = None, - ) -> int: + prev_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, + ) -> Tuple[str, int]: """Invite a 3PID to a room. Args: @@ -1335,9 +1395,13 @@ async def do_3pid_invite( txn_id: The transaction ID this is part of, or None if this is not part of a transaction. id_access_token: The optional identity server access token. + depth: Override the depth used to order the event in the DAG. + prev_event_ids: The event IDs to use as the prev events + Should normally be set to None, which will cause the depth to be calculated + based on the prev_events. Returns: - The new stream ID. + Tuple of event ID and stream ordering position Raises: ShadowBanError if the requester has been shadow-banned. @@ -1383,7 +1447,7 @@ async def do_3pid_invite( # We don't check the invite against the spamchecker(s) here (through # user_may_invite) because we'll do it further down the line anyway (in # update_membership_locked). - _, stream_id = await self.update_membership( + event_id, stream_id = await self.update_membership( requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id ) else: @@ -1402,7 +1466,7 @@ async def do_3pid_invite( additional_fields=spam_check[1], ) - stream_id = await self._make_and_store_3pid_invite( + event, stream_id = await self._make_and_store_3pid_invite( requester, id_server, medium, @@ -1411,9 +1475,12 @@ async def do_3pid_invite( inviter, txn_id=txn_id, id_access_token=id_access_token, + prev_event_ids=prev_event_ids, + depth=depth, ) + event_id = event.event_id - return stream_id + return event_id, stream_id async def _make_and_store_3pid_invite( self, @@ -1425,7 +1492,9 @@ async def _make_and_store_3pid_invite( user: UserID, txn_id: Optional[str], id_access_token: Optional[str] = None, - ) -> int: + prev_event_ids: Optional[List[str]] = None, + depth: Optional[int] = None, + ) -> Tuple[EventBase, int]: room_state = await self._storage_controllers.state.get_current_state( room_id, StateFilter.from_types( @@ -1518,8 +1587,10 @@ async def _make_and_store_3pid_invite( }, ratelimit=False, txn_id=txn_id, + prev_event_ids=prev_event_ids, + depth=depth, ) - return stream_id + return event, stream_id async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool: # Have we just created the room, and is this about to be the very diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py index a305a66860a5..e2844799e886 100644 --- a/synapse/handlers/send_email.py +++ b/synapse/handlers/send_email.py @@ -23,10 +23,12 @@ import twisted from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorTCP +from twisted.internet.interfaces import IOpenSSLContextFactory +from twisted.internet.ssl import optionsForClientTLS from twisted.mail.smtp import ESMTPSender, ESMTPSenderFactory from synapse.logging.context import make_deferred_yieldable +from synapse.types import ISynapseReactor if TYPE_CHECKING: from synapse.server import HomeServer @@ -48,7 +50,7 @@ def _getContextFactory(self) -> Optional[IOpenSSLContextFactory]: async def _sendmail( - reactor: IReactorTCP, + reactor: ISynapseReactor, smtphost: str, smtpport: int, from_addr: str, @@ -59,6 +61,7 @@ async def _sendmail( require_auth: bool = False, require_tls: bool = False, enable_tls: bool = True, + force_tls: bool = False, ) -> None: """A simple wrapper around ESMTPSenderFactory, to allow substitution in tests @@ -73,8 +76,9 @@ async def _sendmail( password: password to give when authenticating require_auth: if auth is not offered, fail the request require_tls: if TLS is not offered, fail the reqest - enable_tls: True to enable TLS. If this is False and require_tls is True, + enable_tls: True to enable STARTTLS. If this is False and require_tls is True, the request will fail. + force_tls: True to enable Implicit TLS. """ msg = BytesIO(msg_bytes) d: "Deferred[object]" = Deferred() @@ -105,13 +109,23 @@ def build_sender_factory(**kwargs: Any) -> ESMTPSenderFactory: # set to enable TLS. factory = build_sender_factory(hostname=smtphost if enable_tls else None) - reactor.connectTCP( - smtphost, - smtpport, - factory, - timeout=30, - bindAddress=None, - ) + if force_tls: + reactor.connectSSL( + smtphost, + smtpport, + factory, + optionsForClientTLS(smtphost), + timeout=30, + bindAddress=None, + ) + else: + reactor.connectTCP( + smtphost, + smtpport, + factory, + timeout=30, + bindAddress=None, + ) await make_deferred_yieldable(d) @@ -132,6 +146,7 @@ def __init__(self, hs: "HomeServer"): self._smtp_pass = passwd.encode("utf-8") if passwd is not None else None self._require_transport_security = hs.config.email.require_transport_security self._enable_tls = hs.config.email.enable_smtp_tls + self._force_tls = hs.config.email.force_tls self._sendmail = _sendmail @@ -189,4 +204,5 @@ async def send_email( require_auth=self._smtp_user is not None, require_tls=self._require_transport_security, enable_tls=self._enable_tls, + force_tls=self._force_tls, ) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index c63d068f74c6..3c35b1d2c7af 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -79,6 +79,7 @@ from synapse.util import json_decoder from synapse.util.async_helpers import AwakenableSleeper, timeout_deferred from synapse.util.metrics import Measure +from synapse.util.stringutils import parse_and_validate_server_name if TYPE_CHECKING: from synapse.server import HomeServer @@ -479,6 +480,14 @@ async def _send_request( RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ + # Validate server name and log if it is an invalid destination, this is + # partially to help track down code paths where we haven't validated before here + try: + parse_and_validate_server_name(request.destination) + except ValueError: + logger.exception(f"Invalid destination: {request.destination}.") + raise FederationDeniedError(request.destination) + if timeout: _sec_timeout = timeout / 1000 else: diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 50c57940f94e..c1aa205eedde 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -84,14 +84,13 @@ def interesting_function(*args, **kwargs): return something_usual_and_useful -Operation names can be explicitly set for a function by passing the -operation name to ``trace`` +Operation names can be explicitly set for a function by using ``trace_with_opname``: .. code-block:: python - from synapse.logging.opentracing import trace + from synapse.logging.opentracing import trace_with_opname - @trace(opname="a_better_operation_name") + @trace_with_opname("a_better_operation_name") def interesting_badly_named_function(*args, **kwargs): # Does all kinds of cool and expected things return something_usual_and_useful @@ -183,6 +182,8 @@ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): Type, TypeVar, Union, + cast, + overload, ) import attr @@ -329,6 +330,7 @@ class _Sentinel(enum.Enum): P = ParamSpec("P") R = TypeVar("R") +T = TypeVar("T") def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]: @@ -344,22 +346,43 @@ def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]: return _only_if_tracing_inner -def ensure_active_span(message: str, ret=None): +@overload +def ensure_active_span( + message: str, +) -> Callable[[Callable[P, R]], Callable[P, Optional[R]]]: + ... + + +@overload +def ensure_active_span( + message: str, ret: T +) -> Callable[[Callable[P, R]], Callable[P, Union[T, R]]]: + ... + + +def ensure_active_span( + message: str, ret: Optional[T] = None +) -> Callable[[Callable[P, R]], Callable[P, Union[Optional[T], R]]]: """Executes the operation only if opentracing is enabled and there is an active span. If there is no active span it logs message at the error level. Args: message: Message which fills in "There was no active span when trying to %s" in the error log if there is no active span and opentracing is enabled. - ret (object): return value if opentracing is None or there is no active span. + ret: return value if opentracing is None or there is no active span. - Returns (object): The result of the func or ret if opentracing is disabled or there + Returns: + The result of the func, falling back to ret if opentracing is disabled or there was no active span. """ - def ensure_active_span_inner_1(func): + def ensure_active_span_inner_1( + func: Callable[P, R] + ) -> Callable[P, Union[Optional[T], R]]: @wraps(func) - def ensure_active_span_inner_2(*args, **kwargs): + def ensure_active_span_inner_2( + *args: P.args, **kwargs: P.kwargs + ) -> Union[Optional[T], R]: if not opentracing: return ret @@ -465,7 +488,7 @@ def start_active_span( finish_on_close: bool = True, *, tracer: Optional["opentracing.Tracer"] = None, -): +) -> "opentracing.Scope": """Starts an active opentracing span. Records the start time for the span, and sets it as the "active span" in the @@ -503,7 +526,7 @@ def start_active_span_follows_from( *, inherit_force_tracing: bool = False, tracer: Optional["opentracing.Tracer"] = None, -): +) -> "opentracing.Scope": """Starts an active opentracing span, with additional references to previous spans Args: @@ -718,7 +741,9 @@ def inject_response_headers(response_headers: Headers) -> None: response_headers.addRawHeader("Synapse-Trace-Id", f"{trace_id:x}") -@ensure_active_span("get the active span context as a dict", ret={}) +@ensure_active_span( + "get the active span context as a dict", ret=cast(Dict[str, str], {}) +) def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]: """ Gets a span context as a dict. This can be used instead of manually @@ -798,33 +823,31 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte # Tracing decorators -def trace(func=None, opname: Optional[str] = None): +def trace_with_opname(opname: str) -> Callable[[Callable[P, R]], Callable[P, R]]: """ - Decorator to trace a function. - Sets the operation name to that of the function's or that given - as operation_name. See the module's doc string for usage - examples. + Decorator to trace a function with a custom opname. + + See the module's doc string for usage examples. + """ - def decorator(func): + def decorator(func: Callable[P, R]) -> Callable[P, R]: if opentracing is None: return func # type: ignore[unreachable] - _opname = opname if opname else func.__name__ - if inspect.iscoroutinefunction(func): @wraps(func) - async def _trace_inner(*args, **kwargs): - with start_active_span(_opname): - return await func(*args, **kwargs) + async def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: + with start_active_span(opname): + return await func(*args, **kwargs) # type: ignore[misc] else: # The other case here handles both sync functions and those # decorated with inlineDeferred. @wraps(func) - def _trace_inner(*args, **kwargs): - scope = start_active_span(_opname) + def _trace_inner(*args: P.args, **kwargs: P.kwargs) -> R: + scope = start_active_span(opname) scope.__enter__() try: @@ -858,12 +881,21 @@ def err_back(result: R) -> R: scope.__exit__(type(e), None, e.__traceback__) raise - return _trace_inner + return _trace_inner # type: ignore[return-value] - if func: - return decorator(func) - else: - return decorator + return decorator + + +def trace(func: Callable[P, R]) -> Callable[P, R]: + """ + Decorator to trace a function. + + Sets the operation name to that of the function's name. + + See the module's doc string for usage examples. + """ + + return trace_with_opname(func.__name__)(func) def tag_args(func: Callable[P, R]) -> Callable[P, R]: @@ -878,9 +910,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]: def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R: argspec = inspect.getfullargspec(func) for i, arg in enumerate(argspec.args[1:]): - set_tag("ARG_" + arg, args[i]) # type: ignore[index] - set_tag("args", args[len(argspec.args) :]) # type: ignore[index] - set_tag("kwargs", kwargs) + set_tag("ARG_" + arg, str(args[i])) # type: ignore[index] + set_tag("args", str(args[len(argspec.args) :])) # type: ignore[index] + set_tag("kwargs", str(kwargs)) return func(*args, **kwargs) return _tag_args_inner diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index eef3462e1053..7a1516d3a89c 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -235,7 +235,7 @@ async def run() -> Optional[R]: f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)} ) else: - ctx = nullcontext() + ctx = nullcontext() # type: ignore[assignment] with ctx: return await func(*args, **kwargs) except Exception: diff --git a/synapse/notifier.py b/synapse/notifier.py index 54b0ec4b97b4..c42bb8266add 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -228,6 +228,7 @@ def __init__(self, hs: "HomeServer"): # Called when there are new things to stream over replication self.replication_callbacks: List[Callable[[], None]] = [] + self._new_join_in_room_callbacks: List[Callable[[str, str], None]] = [] self._federation_client = hs.get_federation_http_client() @@ -280,6 +281,19 @@ def add_replication_callback(self, cb: Callable[[], None]) -> None: """ self.replication_callbacks.append(cb) + def add_new_join_in_room_callback(self, cb: Callable[[str, str], None]) -> None: + """Add a callback that will be called when a user joins a room. + + This only fires on genuine membership changes, e.g. "invite" -> "join". + Membership transitions like "join" -> "join" (for e.g. displayname changes) do + not trigger the callback. + + When called, the callback receives two arguments: the event ID and the room ID. + It should *not* return a Deferred - if it needs to do any asynchronous work, a + background thread should be started and wrapped with run_as_background_process. + """ + self._new_join_in_room_callbacks.append(cb) + async def on_new_room_event( self, event: EventBase, @@ -723,6 +737,10 @@ def notify_replication(self) -> None: for cb in self.replication_callbacks: cb() + def notify_user_joined_room(self, event_id: str, room_id: str) -> None: + for cb in self._new_join_in_room_callbacks: + cb(event_id, room_id) + def notify_remote_server_up(self, server: str) -> None: """Notify any replication that a remote server has come back up""" # We call federation_sender directly rather than registering as a diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index e581af9a9af3..713dcf69504b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -131,6 +131,13 @@ async def _get_rules_for_event( local_users = await self.store.get_local_users_in_room(event.room_id) + # Filter out appservice users. + local_users = [ + u + for u in local_users + if not self.store.get_if_app_services_interested_in_user(u) + ] + # if this event is an invite event, we may need to run rules for the user # who's been invited, otherwise they won't get told they've been invited if event.type == EventTypes.Member and event.membership == Membership.INVITE: diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index d0cc657b4464..1e0ef44fc786 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -328,7 +328,7 @@ async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: return None try: - p = self.pusher_factory.create_pusher(pusher_config) + pusher = self.pusher_factory.create_pusher(pusher_config) except PusherConfigException as e: logger.warning( "Pusher incorrectly configured id=%i, user=%s, appid=%s, pushkey=%s: %s", @@ -346,23 +346,28 @@ async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: ) return None - if not p: + if not pusher: return None - appid_pushkey = "%s:%s" % (pusher_config.app_id, pusher_config.pushkey) + appid_pushkey = "%s:%s" % (pusher.app_id, pusher.pushkey) - byuser = self.pushers.setdefault(pusher_config.user_name, {}) + byuser = self.pushers.setdefault(pusher.user_id, {}) if appid_pushkey in byuser: - byuser[appid_pushkey].on_stop() - byuser[appid_pushkey] = p + previous_pusher = byuser[appid_pushkey] + previous_pusher.on_stop() - synapse_pushers.labels(type(p).__name__, p.app_id).inc() + synapse_pushers.labels( + type(previous_pusher).__name__, previous_pusher.app_id + ).dec() + byuser[appid_pushkey] = pusher + + synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc() # Check if there *may* be push to process. We do this as this check is a # lot cheaper to do than actually fetching the exact rows we need to # push. - user_id = pusher_config.user_name - last_stream_ordering = pusher_config.last_stream_ordering + user_id = pusher.user_id + last_stream_ordering = pusher.last_stream_ordering if last_stream_ordering: have_notifs = await self.store.get_if_maybe_push_in_range_for_user( user_id, last_stream_ordering @@ -372,9 +377,9 @@ async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: # risk missing push. have_notifs = True - p.on_started(have_notifs) + pusher.on_started(have_notifs) - return p + return pusher async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None: appid_pushkey = "%s:%s" % (app_id, pushkey) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index a4ae4040c353..561ad5bf045c 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -29,7 +29,7 @@ from synapse.http.server import HttpServer, is_method_cancellable from synapse.http.site import SynapseRequest from synapse.logging import opentracing -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import trace_with_opname from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -196,7 +196,7 @@ def make_client(cls, hs: "HomeServer") -> Callable: "ascii" ) - @trace(opname="outgoing_replication_request") + @trace_with_opname("outgoing_replication_request") async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py deleted file mode 100644 index 7644146dbadb..000000000000 --- a/synapse/replication/slave/storage/_base.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import TYPE_CHECKING, Optional - -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection -from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.storage.engines import PostgresEngine -from synapse.storage.util.id_generators import MultiWriterIdGenerator - -if TYPE_CHECKING: - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class BaseSlavedStore(CacheInvalidationWorkerStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - super().__init__(database, db_conn, hs) - if isinstance(self.database_engine, PostgresEngine): - self._cache_id_gen: Optional[ - MultiWriterIdGenerator - ] = MultiWriterIdGenerator( - db_conn, - database, - stream_name="caches", - instance_name=hs.get_instance_name(), - tables=[ - ( - "cache_invalidation_stream_by_instance", - "instance_name", - "stream_id", - ) - ], - sequence_name="cache_invalidation_stream_seq", - writers=[], - ) - else: - self._cache_id_gen = None - - self.hs = hs diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py deleted file mode 100644 index ee74ee7d8547..000000000000 --- a/synapse/replication/slave/storage/account_data.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.storage.databases.main.account_data import AccountDataWorkerStore -from synapse.storage.databases.main.tags import TagsWorkerStore - - -class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py deleted file mode 100644 index 29f50c0add5f..000000000000 --- a/synapse/replication/slave/storage/appservice.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.appservice import ( - ApplicationServiceTransactionWorkerStore, - ApplicationServiceWorkerStore, -) - - -class SlavedApplicationServiceStore( - ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore -): - pass diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py deleted file mode 100644 index e94075108465..000000000000 --- a/synapse/replication/slave/storage/deviceinbox.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore - - -class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index a48cc0206944..6fcade510aac 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -14,7 +14,6 @@ from typing import TYPE_CHECKING, Any, Iterable -from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage.database import DatabasePool, LoggingDatabaseConnection @@ -24,7 +23,7 @@ from synapse.server import HomeServer -class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): +class SlavedDeviceStore(DeviceWorkerStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py deleted file mode 100644 index 71fde0c96cc6..000000000000 --- a/synapse/replication/slave/storage/directory.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.directory import DirectoryWorkerStore - -from ._base import BaseSlavedStore - - -class DirectoryStore(DirectoryWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index a72dad7464d7..fe47778cb127 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -29,8 +29,6 @@ from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore from synapse.util.caches.stream_change_cache import StreamChangeCache -from ._base import BaseSlavedStore - if TYPE_CHECKING: from synapse.server import HomeServer @@ -56,7 +54,6 @@ class SlavedEventStore( EventsWorkerStore, UserErasureWorkerStore, RelationsWorkerStore, - BaseSlavedStore, ): def __init__( self, diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 4d185e2b56c7..c52679cd60f3 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -14,16 +14,15 @@ from typing import TYPE_CHECKING +from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.filtering import FilteringStore -from ._base import BaseSlavedStore - if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedFilteringStore(BaseSlavedStore): +class SlavedFilteringStore(SQLBaseStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py deleted file mode 100644 index 99f4a2264287..000000000000 --- a/synapse/replication/slave/storage/profile.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.slave.storage._base import BaseSlavedStore -from synapse.storage.databases.main.profile import ProfileWorkerStore - - -class SlavedProfileStore(ProfileWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index de642bba71b0..44ed20e4243e 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -18,14 +18,13 @@ from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.pusher import PusherWorkerStore -from ._base import BaseSlavedStore from ._slaved_id_tracker import SlavedIdTracker if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): +class SlavedPusherStore(PusherWorkerStore): def __init__( self, database: DatabasePool, diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py deleted file mode 100644 index 3826b87decaf..000000000000 --- a/synapse/replication/slave/storage/receipts.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.receipts import ReceiptsWorkerStore - -from ._base import BaseSlavedStore - - -class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py deleted file mode 100644 index 5dae35a9605f..000000000000 --- a/synapse/replication/slave/storage/registration.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.registration import RegistrationWorkerStore - -from ._base import BaseSlavedStore - - -class SlavedRegistrationStore(RegistrationWorkerStore, BaseSlavedStore): - pass diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 2f59245058e7..e4f2201c922f 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -21,7 +21,7 @@ from twisted.internet.protocol import ReconnectingClientFactory from twisted.python.failure import Failure -from synapse.api.constants import EventTypes, ReceiptTypes +from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable @@ -219,6 +219,21 @@ async def on_rdata( membership=row.data.membership, ) + # If this event is a join, make a note of it so we have an accurate + # cross-worker room rate limit. + # TODO: Erik said we should exclude rows that came from ex_outliers + # here, but I don't see how we can determine that. I guess we could + # add a flag to row.data? + if ( + row.data.type == EventTypes.Member + and row.data.membership == Membership.JOIN + and not row.data.outlier + ): + # TODO retrieve the previous state, and exclude join -> join transitions + self.notifier.notify_user_joined_room( + row.data.event_id, row.data.room_id + ) + await self._presence_handler.process_replication_rows( stream_name, instance_name, token, rows ) diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 26f4fa7cfd16..14b6705862ac 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -98,6 +98,7 @@ class EventsStreamEventRow(BaseEventsStreamRow): relates_to: Optional[str] membership: Optional[str] rejected: bool + outlier: bool @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/synapse/res/templates/sso_auth_account_details.html b/synapse/res/templates/sso_auth_account_details.html index 1ba850369a32..cf72df0a2a39 100644 --- a/synapse/res/templates/sso_auth_account_details.html +++ b/synapse/res/templates/sso_auth_account_details.html @@ -138,7 +138,7 @@

Create your account

@
- +
:{{ server_name }}
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f0614a28976d..ba2f7fa6d860 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -373,6 +373,7 @@ async def on_PUT( if ( self.hs.config.email.email_enable_notifs and self.hs.config.email.email_notif_for_new_users + and medium == "email" ): await self.pusher_pool.add_pusher( user_id=user_id, diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index bdc4a9c0683d..50edc6b7d35e 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -133,7 +133,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( + ret = await self.identity_handler.request_email_token( self.hs.config.registration.account_threepid_delegate_email, email, client_secret, @@ -417,7 +417,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( + ret = await self.identity_handler.request_email_token( self.hs.config.registration.account_threepid_delegate_email, email, client_secret, @@ -551,8 +551,7 @@ async def on_GET(self, request: Request) -> None: elif self.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE: raise SynapseError( 400, - "This homeserver is not validating threepids. Use an identity server " - "instead.", + "This homeserver is not validating threepids.", ) sid = parse_string(request, "sid", required=True) @@ -743,10 +742,12 @@ def __init__(self, hs: "HomeServer"): async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["id_server", "sid", "client_secret"]) + assert_params_in_dict( + body, ["id_server", "sid", "id_access_token", "client_secret"] + ) id_server = body["id_server"] sid = body["sid"] - id_access_token = body.get("id_access_token") # optional + id_access_token = body["id_access_token"] client_secret = body["client_secret"] assert_valid_client_secret(client_secret) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index ce806e3c11be..e3f454896aa8 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -26,7 +26,7 @@ parse_string, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import log_kv, set_tag, trace +from synapse.logging.opentracing import log_kv, set_tag, trace_with_opname from synapse.types import JsonDict, StreamToken from ._base import client_patterns, interactive_auth_handler @@ -71,7 +71,7 @@ def __init__(self, hs: "HomeServer"): self.e2e_keys_handler = hs.get_e2e_keys_handler() self.device_handler = hs.get_device_handler() - @trace(opname="upload_keys") + @trace_with_opname("upload_keys") async def on_POST( self, request: SynapseRequest, device_id: Optional[str] ) -> Tuple[int, JsonDict]: @@ -208,7 +208,9 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # We want to enforce they do pass us one, but we ignore it and return # changes after the "to" as well as before. - set_tag("to", parse_string(request, "to")) + # + # XXX This does not enforce that "to" is passed. + set_tag("to", str(parse_string(request, "to"))) from_token = await StreamToken.from_string(self.store, from_token_string) diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index dd75e40f347c..0437c87d8d6d 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -28,7 +28,7 @@ from typing_extensions import TypedDict -from synapse.api.errors import Codes, LoginError, SynapseError +from synapse.api.errors import Codes, InvalidClientTokenError, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.api.urls import CLIENT_API_PREFIX from synapse.appservice import ApplicationService @@ -172,7 +172,13 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, LoginResponse]: try: if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: - appservice = self.auth.get_appservice_by_req(request) + requester = await self.auth.get_user_by_req(request) + appservice = requester.app_service + + if appservice is None: + raise InvalidClientTokenError( + "This login method is only valid for application services" + ) if appservice.is_rate_limited(): await self._address_ratelimiter.ratelimit( diff --git a/synapse/rest/client/read_marker.py b/synapse/rest/client/read_marker.py index 3644705e6abb..8896f2df50c3 100644 --- a/synapse/rest/client/read_marker.py +++ b/synapse/rest/client/read_marker.py @@ -40,6 +40,10 @@ def __init__(self, hs: "HomeServer"): self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() + self._known_receipt_types = {ReceiptTypes.READ, ReceiptTypes.FULLY_READ} + if hs.config.experimental.msc2285_enabled: + self._known_receipt_types.add(ReceiptTypes.READ_PRIVATE) + async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: @@ -49,13 +53,7 @@ async def on_POST( body = parse_json_object_from_request(request) - valid_receipt_types = { - ReceiptTypes.READ, - ReceiptTypes.FULLY_READ, - ReceiptTypes.READ_PRIVATE, - } - - unrecognized_types = set(body.keys()) - valid_receipt_types + unrecognized_types = set(body.keys()) - self._known_receipt_types if unrecognized_types: # It's fine if there are unrecognized receipt types, but let's log # it to help debug clients that have typoed the receipt type. @@ -65,31 +63,25 @@ async def on_POST( # types. logger.info("Ignoring unrecognized receipt types: %s", unrecognized_types) - read_event_id = body.get(ReceiptTypes.READ, None) - if read_event_id: - await self.receipts_handler.received_client_receipt( - room_id, - ReceiptTypes.READ, - user_id=requester.user.to_string(), - event_id=read_event_id, - ) - - read_private_event_id = body.get(ReceiptTypes.READ_PRIVATE, None) - if read_private_event_id and self.config.experimental.msc2285_enabled: - await self.receipts_handler.received_client_receipt( - room_id, - ReceiptTypes.READ_PRIVATE, - user_id=requester.user.to_string(), - event_id=read_private_event_id, - ) - - read_marker_event_id = body.get(ReceiptTypes.FULLY_READ, None) - if read_marker_event_id: - await self.read_marker_handler.received_client_read_marker( - room_id, - user_id=requester.user.to_string(), - event_id=read_marker_event_id, - ) + for receipt_type in self._known_receipt_types: + event_id = body.get(receipt_type, None) + # TODO Add validation to reject non-string event IDs. + if not event_id: + continue + + if receipt_type == ReceiptTypes.FULLY_READ: + await self.read_marker_handler.received_client_read_marker( + room_id, + user_id=requester.user.to_string(), + event_id=event_id, + ) + else: + await self.receipts_handler.received_client_receipt( + room_id, + receipt_type, + user_id=requester.user.to_string(), + event_id=event_id, + ) return 200, {} diff --git a/synapse/rest/client/receipts.py b/synapse/rest/client/receipts.py index 4b03eb876b75..409bfd43c11c 100644 --- a/synapse/rest/client/receipts.py +++ b/synapse/rest/client/receipts.py @@ -39,31 +39,27 @@ class ReceiptRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() self.read_marker_handler = hs.get_read_marker_handler() self.presence_handler = hs.get_presence_handler() + self._known_receipt_types = {ReceiptTypes.READ} + if hs.config.experimental.msc2285_enabled: + self._known_receipt_types.update( + (ReceiptTypes.READ_PRIVATE, ReceiptTypes.FULLY_READ) + ) + async def on_POST( self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - if self.hs.config.experimental.msc2285_enabled and receipt_type not in [ - ReceiptTypes.READ, - ReceiptTypes.READ_PRIVATE, - ReceiptTypes.FULLY_READ, - ]: + if receipt_type not in self._known_receipt_types: raise SynapseError( 400, - "Receipt type must be 'm.read', 'org.matrix.msc2285.read.private' or 'm.fully_read'", + f"Receipt type must be {', '.join(self._known_receipt_types)}", ) - elif ( - not self.hs.config.experimental.msc2285_enabled - and receipt_type != ReceiptTypes.READ - ): - raise SynapseError(400, "Receipt type must be 'm.read'") parse_json_object_from_request(request, allow_empty_body=False) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index e8e51a9c66ad..b7ab090bbdcd 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -142,7 +142,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: assert self.hs.config.registration.account_threepid_delegate_email # Have the configured identity server handle the request - ret = await self.identity_handler.requestEmailToken( + ret = await self.identity_handler.request_email_token( self.hs.config.registration.account_threepid_delegate_email, email, client_secret, @@ -150,17 +150,17 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: next_link, ) else: - # Send registration emails from Synapse - sid = await self.identity_handler.send_threepid_validation( - email, - client_secret, - send_attempt, - self.mailer.send_registration_mail, - next_link, - ) - - # Wrap the session id in a JSON object - ret = {"sid": sid} + # Send registration emails from Synapse, + # wrapping the session id in a JSON object. + ret = { + "sid": await self.identity_handler.send_threepid_validation( + email, + client_secret, + send_attempt, + self.mailer.send_registration_mail, + next_link, + ) + } threepid_send_requests.labels(type="email", reason="register").observe( send_attempt diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index 37e39570f6ac..f7081f638e51 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Optional, Tuple +from typing import TYPE_CHECKING, Optional, Tuple, cast from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer @@ -127,7 +127,7 @@ async def on_PUT( requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() body = parse_json_object_from_request(request) - version = parse_string(request, "version") + version = parse_string(request, "version", required=True) if session_id: body = {"sessions": {session_id: body}} @@ -196,8 +196,11 @@ async def on_GET( user_id = requester.user.to_string() version = parse_string(request, "version", required=True) - room_keys = await self.e2e_room_keys_handler.get_room_keys( - user_id, version, room_id, session_id + room_keys = cast( + JsonDict, + await self.e2e_room_keys_handler.get_room_keys( + user_id, version, room_id, session_id + ), ) # Convert room_keys to the right format to return. @@ -240,7 +243,7 @@ async def on_DELETE( requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() - version = parse_string(request, "version") + version = parse_string(request, "version", required=True) ret = await self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 3322c8ef48da..1a8e9a96d4c7 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -19,7 +19,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import assert_params_in_dict, parse_json_object_from_request from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag, trace +from synapse.logging.opentracing import set_tag, trace_with_opname from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict @@ -43,7 +43,7 @@ def __init__(self, hs: "HomeServer"): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() - @trace(opname="sendToDevice") + @trace_with_opname("sendToDevice") def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str ) -> Awaitable[Tuple[int, JsonDict]]: diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8bbf35148d67..c2989765cee0 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -37,7 +37,7 @@ from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import trace +from synapse.logging.opentracing import trace_with_opname from synapse.types import JsonDict, StreamToken from synapse.util import json_decoder @@ -210,7 +210,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: logger.debug("Event formatting complete") return 200, response_content - @trace(opname="sync.encode_response") + @trace_with_opname("sync.encode_response") async def encode_response( self, time_now: int, @@ -315,7 +315,7 @@ def encode_presence(events: List[UserPresenceState], time_now: int) -> JsonDict: ] } - @trace(opname="sync.encode_joined") + @trace_with_opname("sync.encode_joined") async def encode_joined( self, rooms: List[JoinedSyncResult], @@ -340,7 +340,7 @@ async def encode_joined( return joined - @trace(opname="sync.encode_invited") + @trace_with_opname("sync.encode_invited") async def encode_invited( self, rooms: List[InvitedSyncResult], @@ -371,7 +371,7 @@ async def encode_invited( return invited - @trace(opname="sync.encode_knocked") + @trace_with_opname("sync.encode_knocked") async def encode_knocked( self, rooms: List[KnockedSyncResult], @@ -420,7 +420,7 @@ async def encode_knocked( return knocked - @trace(opname="sync.encode_archived") + @trace_with_opname("sync.encode_archived") async def encode_archived( self, rooms: List[ArchivedSyncResult], diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 54a849eac94d..b36c98a08ea3 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -109,10 +109,64 @@ class MediaInfo: class PreviewUrlResource(DirectServeJsonResource): """ - Generating URL previews is a complicated task which many potential pitfalls. - - See docs/development/url_previews.md for discussion of the design and - algorithm followed in this module. + The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API + for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix + specific additions). + + This does have trade-offs compared to other designs: + + * Pros: + * Simple and flexible; can be used by any clients at any point + * Cons: + * If each homeserver provides one of these independently, all the homeservers in a + room may needlessly DoS the target URI + * The URL metadata must be stored somewhere, rather than just using Matrix + itself to store the media. + * Matrix cannot be used to distribute the metadata between homeservers. + + When Synapse is asked to preview a URL it does the following: + + 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the + config). + 2. Checks the URL against an in-memory cache and returns the result if it exists. (This + is also used to de-duplicate processing of multiple in-flight requests at once.) + 3. Kicks off a background process to generate a preview: + 1. Checks URL and timestamp against the database cache and returns the result if it + has not expired and was successful (a 2xx return code). + 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it + does, update the URL to download. + 3. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 4. If the media is an image: + 1. Generates thumbnails. + 2. Generates an Open Graph response based on image properties. + 5. If the media is HTML: + 1. Decodes the HTML via the stored file. + 2. Generates an Open Graph response from the HTML. + 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: + 1. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 2. Convert the oEmbed response to an Open Graph response. + 3. Override any Open Graph data from the HTML with data from oEmbed. + 4. If an image exists in the Open Graph response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 6. If the media is JSON and an oEmbed URL was found: + 1. Convert the oEmbed response to an Open Graph response. + 2. If a thumbnail or image is in the oEmbed response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 7. Stores the result in the database cache. + 4. Returns the result. + + The in-memory cache expires after 1 hour. + + Expired entries in the database cache (and their associated media files) are + deleted every 10 seconds. The default expiration time is 1 hour from download. """ isLeaf = True diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 2295adfaa7d6..5f725c76009f 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -17,9 +17,11 @@ import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple -from synapse.api.errors import SynapseError +from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.config.repository import THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP from synapse.http.server import ( DirectServeJsonResource, + respond_with_json, set_corp_headers, set_cors_headers, ) @@ -309,6 +311,19 @@ async def _select_and_respond_with_thumbnail( url_cache: True if this is from a URL cache. server_name: The server name, if this is a remote thumbnail. """ + logger.debug( + "_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s", + media_id, + desired_width, + desired_height, + desired_method, + thumbnail_infos, + ) + + # If `dynamic_thumbnails` is enabled, we expect Synapse to go down a + # different code path to handle it. + assert not self.dynamic_thumbnails + if thumbnail_infos: file_info = self._select_thumbnail( desired_width, @@ -384,8 +399,29 @@ async def _select_and_respond_with_thumbnail( file_info.thumbnail.length, ) else: + # This might be because: + # 1. We can't create thumbnails for the given media (corrupted or + # unsupported file type), or + # 2. The thumbnailing process never ran or errored out initially + # when the media was first uploaded (these bugs should be + # reported and fixed). + # Note that we don't attempt to generate a thumbnail now because + # `dynamic_thumbnails` is disabled. logger.info("Failed to find any generated thumbnails") - respond_404(request) + + respond_with_json( + request, + 400, + cs_error( + "Cannot find any thumbnails for the requested media (%r). This might mean the media is not a supported_media_format=(%s) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)" + % ( + request.postpath, + ", ".join(THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.keys()), + ), + code=Codes.UNKNOWN, + ), + send_cors=True, + ) def _select_thumbnail( self, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 781d9f06dace..87ccd52f0ab7 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -14,7 +14,7 @@ # limitations under the License. import heapq import logging -from collections import defaultdict +from collections import ChainMap, defaultdict from typing import ( TYPE_CHECKING, Any, @@ -24,14 +24,12 @@ DefaultDict, Dict, FrozenSet, - Iterable, List, Mapping, Optional, Sequence, Set, Tuple, - Union, ) import attr @@ -47,6 +45,7 @@ from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo +from synapse.storage.state import StateFilter from synapse.types import StateMap from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -54,6 +53,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer + from synapse.storage.controllers import StateStorageController from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) @@ -83,17 +83,26 @@ def _gen_state_id() -> str: class _StateCacheEntry: - __slots__ = ["state", "state_group", "state_id", "prev_group", "delta_ids"] + __slots__ = ["_state", "state_group", "prev_group", "delta_ids"] def __init__( self, - state: StateMap[str], + state: Optional[StateMap[str]], state_group: Optional[int], prev_group: Optional[int] = None, delta_ids: Optional[StateMap[str]] = None, ): + if state is None and state_group is None and prev_group is None: + raise Exception("One of state, state_group or prev_group must be not None") + + if prev_group is not None and delta_ids is None: + raise Exception("If prev_group is set so must delta_ids") + # A map from (type, state_key) to event_id. - self.state = frozendict(state) + # + # This can be None if we have a `state_group` (as then we can fetch the + # state from the DB.) + self._state = frozendict(state) if state is not None else None # the ID of a state group if one and only one is involved. # otherwise, None otherwise? @@ -102,20 +111,60 @@ def __init__( self.prev_group = prev_group self.delta_ids = frozendict(delta_ids) if delta_ids is not None else None - # The `state_id` is a unique ID we generate that can be used as ID for - # this collection of state. Usually this would be the same as the - # state group, but on worker instances we can't generate a new state - # group each time we resolve state, so we generate a separate one that - # isn't persisted and is used solely for caches. - # `state_id` is either a state_group (and so an int) or a string. This - # ensures we don't accidentally persist a state_id as a stateg_group - if state_group: - self.state_id: Union[str, int] = state_group - else: - self.state_id = _gen_state_id() + async def get_state( + self, + state_storage: "StateStorageController", + state_filter: Optional["StateFilter"] = None, + ) -> StateMap[str]: + """Get the state map for this entry, either from the in-memory state or + looking up the state group in the DB. + """ + + if self._state is not None: + return self._state + + if self.state_group is not None: + return await state_storage.get_state_ids_for_group( + self.state_group, state_filter + ) + + assert self.prev_group is not None and self.delta_ids is not None + + prev_state = await state_storage.get_state_ids_for_group( + self.prev_group, state_filter + ) + + # ChainMap expects MutableMapping, but since we're using it immutably + # its safe to give it immutable maps. + return ChainMap(self.delta_ids, prev_state) # type: ignore[arg-type] + + def set_state_group(self, state_group: int) -> None: + """Update the state group assigned to this state (e.g. after we've + persisted it). + + Note: this will cause the cache entry to drop any stored state. + """ + + self.state_group = state_group + + # We clear out the state as we know longer need to explicitly keep it in + # the `state_cache` (as the store state group cache will do that). + self._state = None def __len__(self) -> int: - return len(self.state) + # The len should be used to estimate how large this cache entry is, for + # cache eviction purposes. This is why it's fine to return 1 if we're + # not storing any state. + + length = 0 + + if self._state: + length += len(self._state) + + if self.delta_ids: + length += len(self.delta_ids) + + return length or 1 # Make sure its not 0. class StateHandler: @@ -137,23 +186,29 @@ def __init__(self, hs: "HomeServer"): ReplicationUpdateCurrentStateRestServlet.make_client(hs) ) - async def get_current_state_ids( + async def compute_state_after_events( self, room_id: str, - latest_event_ids: Collection[str], + event_ids: Collection[str], + state_filter: Optional[StateFilter] = None, ) -> StateMap[str]: - """Get the current state, or the state at a set of events, for a room + """Fetch the state after each of the given event IDs. Resolve them and return. + + This is typically used where `event_ids` is a collection of forward extremities + in a room, intended to become the `prev_events` of a new event E. If so, the + return value of this function represents the state before E. Args: - room_id: - latest_event_ids: The forward extremities to resolve. + room_id: the room_id containing the given events. + event_ids: the events whose state should be fetched and resolved. Returns: - the state dict, mapping from (event_type, state_key) -> event_id + the state dict (a mapping from (event_type, state_key) -> event_id) which + holds the resolution of the states after the given event IDs. """ - logger.debug("calling resolve_state_groups from get_current_state_ids") - ret = await self.resolve_state_groups_for_events(room_id, latest_event_ids) - return ret.state + logger.debug("calling resolve_state_groups from compute_state_after_events") + ret = await self.resolve_state_groups_for_events(room_id, event_ids) + return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_users_in_room( self, room_id: str, latest_event_ids: List[str] @@ -177,7 +232,8 @@ async def get_current_users_in_room( logger.debug("calling resolve_state_groups from get_current_users_in_room") entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids) - return await self.store.get_joined_users_from_state(room_id, entry) + state = await entry.get_state(self._state_storage_controller, StateFilter.all()) + return await self.store.get_joined_users_from_state(room_id, state, entry) async def get_hosts_in_room_at_events( self, room_id: str, event_ids: Collection[str] @@ -192,7 +248,8 @@ async def get_hosts_in_room_at_events( The hosts in the room at the given events """ entry = await self.resolve_state_groups_for_events(room_id, event_ids) - return await self.store.get_joined_hosts(room_id, entry) + state = await entry.get_state(self._state_storage_controller, StateFilter.all()) + return await self.store.get_joined_hosts(room_id, state, entry) async def compute_event_context( self, @@ -227,10 +284,19 @@ async def compute_event_context( # if state_ids_before_event: # if we're given the state before the event, then we use that - state_group_before_event = None state_group_before_event_prev_group = None deltas_to_state_group_before_event = None - entry = None + + # .. though we need to get a state group for it. + state_group_before_event = ( + await self._state_storage_controller.store_state_group( + event.event_id, + event.room_id, + prev_group=None, + delta_ids=None, + current_state_ids=state_ids_before_event, + ) + ) else: # otherwise, we'll need to resolve the state across the prev_events. @@ -264,36 +330,32 @@ async def compute_event_context( await_full_state=False, ) - state_ids_before_event = entry.state - state_group_before_event = entry.state_group state_group_before_event_prev_group = entry.prev_group deltas_to_state_group_before_event = entry.delta_ids + state_ids_before_event = None + + # We make sure that we have a state group assigned to the state. + if entry.state_group is None: + # store_state_group requires us to have either a previous state group + # (with deltas) or the complete state map. So, if we don't have a + # previous state group, load the complete state map now. + if state_group_before_event_prev_group is None: + state_ids_before_event = await entry.get_state( + self._state_storage_controller, StateFilter.all() + ) - # - # make sure that we have a state group at that point. If it's not a state event, - # that will be the state group for the new event. If it *is* a state event, - # it might get rejected (in which case we'll need to persist it with the - # previous state group) - # - - if not state_group_before_event: - state_group_before_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, - current_state_ids=state_ids_before_event, + state_group_before_event = ( + await self._state_storage_controller.store_state_group( + event.event_id, + event.room_id, + prev_group=state_group_before_event_prev_group, + delta_ids=deltas_to_state_group_before_event, + current_state_ids=state_ids_before_event, + ) ) - ) - - # Assign the new state group to the cached state entry. - # - # Note that this can race in that we could generate multiple state - # groups for the same state entry, but that is just inefficient - # rather than dangerous. - if entry and entry.state_group is None: - entry.state_group = state_group_before_event + entry.set_state_group(state_group_before_event) + else: + state_group_before_event = entry.state_group # # now if it's not a state event, we're done @@ -315,13 +377,18 @@ async def compute_event_context( # key = (event.type, event.state_key) - if key in state_ids_before_event: - replaces = state_ids_before_event[key] - if replaces != event.event_id: - event.unsigned["replaces_state"] = replaces - state_ids_after_event = dict(state_ids_before_event) - state_ids_after_event[key] = event.event_id + if state_ids_before_event is not None: + replaces = state_ids_before_event.get(key) + else: + replaces_state_map = await entry.get_state( + self._state_storage_controller, StateFilter.from_types([key]) + ) + replaces = replaces_state_map.get(key) + + if replaces and replaces != event.event_id: + event.unsigned["replaces_state"] = replaces + delta_ids = {key: event.event_id} state_group_after_event = ( @@ -330,7 +397,7 @@ async def compute_event_context( event.room_id, prev_group=state_group_before_event, delta_ids=delta_ids, - current_state_ids=state_ids_after_event, + current_state_ids=None, ) ) @@ -372,9 +439,6 @@ async def resolve_state_groups_for_events( state_group_ids_set = set(state_group_ids) if len(state_group_ids_set) == 1: (state_group_id,) = state_group_ids_set - state = await self._state_storage_controller.get_state_for_groups( - state_group_ids_set - ) ( prev_group, delta_ids, @@ -382,7 +446,7 @@ async def resolve_state_groups_for_events( state_group_id ) return _StateCacheEntry( - state=state[state_group_id], + state=None, state_group=state_group_id, prev_group=prev_group, delta_ids=delta_ids, @@ -405,31 +469,6 @@ async def resolve_state_groups_for_events( ) return result - async def resolve_events( - self, - room_version: str, - state_sets: Collection[Iterable[EventBase]], - event: EventBase, - ) -> StateMap[EventBase]: - logger.info( - "Resolving state for %s with %d groups", event.room_id, len(state_sets) - ) - state_set_ids = [ - {(ev.type, ev.state_key): ev.event_id for ev in st} for st in state_sets - ] - - state_map = {ev.event_id: ev for st in state_sets for ev in st} - - new_state = await self._state_resolution_handler.resolve_events_with_store( - event.room_id, - room_version, - state_set_ids, - event_map=state_map, - state_res_store=StateResolutionStore(self.store), - ) - - return {key: state_map[ev_id] for key, ev_id in new_state.items()} - async def update_current_state(self, room_id: str) -> None: """Recalculates the current state for a room, and persists it. @@ -741,7 +780,7 @@ def _make_state_cache_entry( old_state_event_ids = set(state.values()) if new_state_event_ids == old_state_event_ids: # got an exact match. - return _StateCacheEntry(state=new_state, state_group=sg) + return _StateCacheEntry(state=None, state_group=sg) # TODO: We want to create a state group for this set of events, to # increase cache hits, but we need to make sure that it doesn't @@ -752,14 +791,25 @@ def _make_state_cache_entry( delta_ids: Optional[StateMap[str]] = None for old_group, old_state in state_groups_ids.items(): + if old_state.keys() - new_state.keys(): + # Currently we don't support deltas that remove keys from the state + # map, so we have to ignore this group as a candidate to base the + # new group on. + continue + n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v} if not delta_ids or len(n_delta_ids) < len(delta_ids): prev_group = old_group delta_ids = n_delta_ids - return _StateCacheEntry( - state=new_state, state_group=None, prev_group=prev_group, delta_ids=delta_ids - ) + if prev_group is not None: + # If we have a prev group and deltas then we can drop the new state from + # the cache (to reduce memory usage). + return _StateCacheEntry( + state=None, state_group=None, prev_group=prev_group, delta_ids=delta_ids + ) + else: + return _StateCacheEntry(state=new_state, state_group=None) @attr.s(slots=True, auto_attribs=True) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index b8c8dcd76bfc..e30f9c76d461 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -80,6 +80,10 @@ def _invalidate_state_caches( ) self._attempt_to_invalidate_cache("get_local_users_in_room", (room_id,)) + # There's no easy way of invalidating this cache for just the users + # that have changed, so we just clear the entire thing. + self._attempt_to_invalidate_cache("does_pair_of_users_share_a_room", None) + for user_id in members_changed: self._attempt_to_invalidate_cache( "get_user_in_room_with_profile", (room_id, user_id) @@ -96,6 +100,10 @@ def _attempt_to_invalidate_cache( cache doesn't exist. Mainly used for invalidating caches on workers, where they may not have the cache. + Note that this function does not invalidate any remote caches, only the + local in-memory ones. Any remote invalidation must be performed before + calling this. + Args: cache_name key: Entry to invalidate. If None then invalidates the entire @@ -112,7 +120,10 @@ def _attempt_to_invalidate_cache( if key is None: cache.invalidate_all() else: - cache.invalidate(tuple(key)) + # Prefer any local-only invalidation method. Invalidating any non-local + # cache must be be done before this. + invalidate_method = getattr(cache, "invalidate_local", cache.invalidate) + invalidate_method(tuple(key)) def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: diff --git a/synapse/storage/controllers/__init__.py b/synapse/storage/controllers/__init__.py index 55649719f6f6..45101cda7adf 100644 --- a/synapse/storage/controllers/__init__.py +++ b/synapse/storage/controllers/__init__.py @@ -43,4 +43,6 @@ def __init__(self, hs: "HomeServer", stores: Databases): self.persistence = None if stores.persist_events: - self.persistence = EventsPersistenceStorageController(hs, stores) + self.persistence = EventsPersistenceStorageController( + hs, stores, self.state + ) diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index ea499ce0f827..cf98b0ab48f8 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -48,9 +48,11 @@ from synapse.logging import opentracing from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage.controllers.state import StateStorageController from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events_worker import EventRedactBehaviour +from synapse.storage.state import StateFilter from synapse.types import ( PersistedEventPosition, RoomStreamToken, @@ -308,7 +310,12 @@ class EventsPersistenceStorageController: current state and forward extremity changes. """ - def __init__(self, hs: "HomeServer", stores: Databases): + def __init__( + self, + hs: "HomeServer", + stores: Databases, + state_controller: StateStorageController, + ): # We ultimately want to split out the state store from the main store, # so we use separate variables here even though they point to the same # store for now. @@ -325,6 +332,7 @@ def __init__(self, hs: "HomeServer", stores: Databases): self._process_event_persist_queue_task ) self._state_resolution_handler = hs.get_state_resolution_handler() + self._state_controller = state_controller async def _process_event_persist_queue_task( self, @@ -504,7 +512,7 @@ async def _calculate_current_state(self, room_id: str) -> StateMap[str]: state_res_store=StateResolutionStore(self.main_store), ) - return res.state + return await res.get_state(self._state_controller, StateFilter.all()) async def _persist_event_batch( self, _room_id: str, task: _PersistEventsTask @@ -940,7 +948,8 @@ async def _get_new_state_after_events( events_context, ) - return res.state, None, new_latest_event_ids + full_state = await res.get_state(self._state_controller) + return full_state, None, new_latest_event_ids async def _prune_extremities( self, diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index d3a44bc876b9..e08f956e6ef7 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -346,7 +346,7 @@ async def store_state_group( room_id: str, prev_group: Optional[int], delta_ids: Optional[StateMap[str]], - current_state_ids: StateMap[str], + current_state_ids: Optional[StateMap[str]], ) -> int: """Store a new set of state, returning a newly assigned state group. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index e21ab0851530..b394a6658b09 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -23,6 +23,7 @@ from typing import ( TYPE_CHECKING, Any, + Awaitable, Callable, Collection, Dict, @@ -38,7 +39,7 @@ ) import attr -from prometheus_client import Histogram +from prometheus_client import Counter, Histogram from typing_extensions import Concatenate, Literal, ParamSpec from twisted.enterprise import adbapi @@ -75,7 +76,8 @@ sql_scheduling_timer = Histogram("synapse_storage_schedule_time", "sec") sql_query_timer = Histogram("synapse_storage_query_time", "sec", ["verb"]) -sql_txn_timer = Histogram("synapse_storage_transaction_time", "sec", ["desc"]) +sql_txn_count = Counter("synapse_storage_transaction_time_count", "sec", ["desc"]) +sql_txn_duration = Counter("synapse_storage_transaction_time_sum", "sec", ["desc"]) # Unique indexes which have been added in background updates. Maps from table name @@ -168,6 +170,7 @@ def cursor( *, txn_name: Optional[str] = None, after_callbacks: Optional[List["_CallbackListEntry"]] = None, + async_after_callbacks: Optional[List["_AsyncCallbackListEntry"]] = None, exception_callbacks: Optional[List["_CallbackListEntry"]] = None, ) -> "LoggingTransaction": if not txn_name: @@ -178,6 +181,7 @@ def cursor( name=txn_name, database_engine=self.engine, after_callbacks=after_callbacks, + async_after_callbacks=async_after_callbacks, exception_callbacks=exception_callbacks, ) @@ -209,6 +213,9 @@ def __getattr__(self, name: str) -> Any: # The type of entry which goes on our after_callbacks and exception_callbacks lists. _CallbackListEntry = Tuple[Callable[..., object], Tuple[object, ...], Dict[str, object]] +_AsyncCallbackListEntry = Tuple[ + Callable[..., Awaitable], Tuple[object, ...], Dict[str, object] +] P = ParamSpec("P") R = TypeVar("R") @@ -227,6 +234,10 @@ class LoggingTransaction: that have been added by `call_after` which should be run on successful completion of the transaction. None indicates that no callbacks should be allowed to be scheduled to run. + async_after_callbacks: A list that asynchronous callbacks will be appended + to by `async_call_after` which should run, before after_callbacks, on + successful completion of the transaction. None indicates that no + callbacks should be allowed to be scheduled to run. exception_callbacks: A list that callbacks will be appended to that have been added by `call_on_exception` which should be run if transaction ends with an error. None indicates that no callbacks @@ -238,6 +249,7 @@ class LoggingTransaction: "name", "database_engine", "after_callbacks", + "async_after_callbacks", "exception_callbacks", ] @@ -247,12 +259,14 @@ def __init__( name: str, database_engine: BaseDatabaseEngine, after_callbacks: Optional[List[_CallbackListEntry]] = None, + async_after_callbacks: Optional[List[_AsyncCallbackListEntry]] = None, exception_callbacks: Optional[List[_CallbackListEntry]] = None, ): self.txn = txn self.name = name self.database_engine = database_engine self.after_callbacks = after_callbacks + self.async_after_callbacks = async_after_callbacks self.exception_callbacks = exception_callbacks def call_after( @@ -277,6 +291,28 @@ def call_after( # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 self.after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + def async_call_after( + self, callback: Callable[P, Awaitable], *args: P.args, **kwargs: P.kwargs + ) -> None: + """Call the given asynchronous callback on the main twisted thread after + the transaction has finished (but before those added in `call_after`). + + Mostly used to invalidate remote caches after transactions. + + Note that transactions may be retried a few times if they encounter database + errors such as serialization failures. Callbacks given to `async_call_after` + will accumulate across transaction attempts and will _all_ be called once a + transaction attempt succeeds, regardless of whether previous transaction + attempts failed. Otherwise, if all transaction attempts fail, all + `call_on_exception` callbacks will be run instead. + """ + # if self.async_after_callbacks is None, that means that whatever constructed the + # LoggingTransaction isn't expecting there to be any callbacks; assert that + # is not the case. + assert self.async_after_callbacks is not None + # type-ignore: need mypy containing https://github.com/python/mypy/pull/12668 + self.async_after_callbacks.append((callback, args, kwargs)) # type: ignore[arg-type] + def call_on_exception( self, callback: Callable[P, object], *args: P.args, **kwargs: P.kwargs ) -> None: @@ -574,6 +610,7 @@ def new_transaction( conn: LoggingDatabaseConnection, desc: str, after_callbacks: List[_CallbackListEntry], + async_after_callbacks: List[_AsyncCallbackListEntry], exception_callbacks: List[_CallbackListEntry], func: Callable[Concatenate[LoggingTransaction, P], R], *args: P.args, @@ -597,6 +634,7 @@ def new_transaction( conn desc after_callbacks + async_after_callbacks exception_callbacks func *args @@ -659,6 +697,7 @@ def new_transaction( cursor = conn.cursor( txn_name=name, after_callbacks=after_callbacks, + async_after_callbacks=async_after_callbacks, exception_callbacks=exception_callbacks, ) try: @@ -757,7 +796,8 @@ def new_transaction( self._current_txn_total_time += duration self._txn_perf_counters.update(desc, duration) - sql_txn_timer.labels(desc).observe(duration) + sql_txn_count.labels(desc).inc(1) + sql_txn_duration.labels(desc).inc(duration) async def runInteraction( self, @@ -798,6 +838,7 @@ async def runInteraction( async def _runInteraction() -> R: after_callbacks: List[_CallbackListEntry] = [] + async_after_callbacks: List[_AsyncCallbackListEntry] = [] exception_callbacks: List[_CallbackListEntry] = [] if not current_context(): @@ -809,6 +850,7 @@ async def _runInteraction() -> R: self.new_transaction, desc, after_callbacks, + async_after_callbacks, exception_callbacks, func, *args, @@ -817,13 +859,17 @@ async def _runInteraction() -> R: **kwargs, ) + # We order these assuming that async functions call out to external + # systems (e.g. to invalidate a cache) and the sync functions make these + # changes on any local in-memory caches/similar, and thus must be second. + for async_callback, async_args, async_kwargs in async_after_callbacks: + await async_callback(*async_args, **async_kwargs) for after_callback, after_args, after_kwargs in after_callbacks: after_callback(*after_args, **after_kwargs) - return cast(R, result) except Exception: - for after_callback, after_args, after_kwargs in exception_callbacks: - after_callback(*after_args, **after_kwargs) + for exception_callback, after_args, after_kwargs in exception_callbacks: + exception_callback(*after_args, **after_kwargs) raise # To handle cancellation, we ensure that `after_callback`s and diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index a3d31d373724..4dccbb732a20 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -24,9 +24,9 @@ LoggingTransaction, ) from synapse.storage.databases.main.stats import UserSortOrder -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.types import Cursor -from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator +from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict, get_domain_from_id from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -149,31 +149,6 @@ def __init__( ], ) - self._cache_id_gen: Optional[MultiWriterIdGenerator] - if isinstance(self.database_engine, PostgresEngine): - # We set the `writers` to an empty list here as we don't care about - # missing updates over restarts, as we'll not have anything in our - # caches to invalidate. (This reduces the amount of writes to the DB - # that happen). - self._cache_id_gen = MultiWriterIdGenerator( - db_conn, - database, - stream_name="caches", - instance_name=hs.get_instance_name(), - tables=[ - ( - "cache_invalidation_stream_by_instance", - "instance_name", - "stream_id", - ) - ], - sequence_name="cache_invalidation_stream_seq", - writers=[], - ) - - else: - self._cache_id_gen = None - super().__init__(database, db_conn, hs) events_max = self._stream_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index e284454b660f..64b70a7b28ee 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -371,52 +371,30 @@ def _get_oldest_unsent_txn( device_list_summary=DeviceListUpdates(), ) - async def set_appservice_last_pos(self, pos: int) -> None: - def set_appservice_last_pos_txn(txn: LoggingTransaction) -> None: - txn.execute( - "UPDATE appservice_stream_position SET stream_ordering = ?", (pos,) - ) + async def get_appservice_last_pos(self) -> int: + """ + Get the last stream ordering position for the appservice process. + """ - await self.db_pool.runInteraction( - "set_appservice_last_pos", set_appservice_last_pos_txn + return await self.db_pool.simple_select_one_onecol( + table="appservice_stream_position", + retcol="stream_ordering", + keyvalues={}, + desc="get_appservice_last_pos", ) - async def get_new_events_for_appservice( - self, current_id: int, limit: int - ) -> Tuple[int, List[EventBase]]: - """Get all new events for an appservice""" - - def get_new_events_for_appservice_txn( - txn: LoggingTransaction, - ) -> Tuple[int, List[str]]: - sql = ( - "SELECT e.stream_ordering, e.event_id" - " FROM events AS e" - " WHERE" - " (SELECT stream_ordering FROM appservice_stream_position)" - " < e.stream_ordering" - " AND e.stream_ordering <= ?" - " ORDER BY e.stream_ordering ASC" - " LIMIT ?" - ) - - txn.execute(sql, (current_id, limit)) - rows = txn.fetchall() - - upper_bound = current_id - if len(rows) == limit: - upper_bound = rows[-1][0] - - return upper_bound, [row[1] for row in rows] + async def set_appservice_last_pos(self, pos: int) -> None: + """ + Set the last stream ordering position for the appservice process. + """ - upper_bound, event_ids = await self.db_pool.runInteraction( - "get_new_events_for_appservice", get_new_events_for_appservice_txn + await self.db_pool.simple_update_one( + table="appservice_stream_position", + keyvalues={}, + updatevalues={"stream_ordering": pos}, + desc="set_appservice_last_pos", ) - events = await self.get_events_as_list(event_ids, get_prev_content=True) - - return upper_bound, events - async def get_type_stream_id_for_appservice( self, service: ApplicationService, type: str ) -> int: diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 1653a6a9b694..12e9a423826a 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -32,6 +32,7 @@ LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import _CachedFunction from synapse.util.iterutils import batch_iter @@ -65,6 +66,31 @@ def __init__( psql_only=True, # The table is only on postgres DBs. ) + self._cache_id_gen: Optional[MultiWriterIdGenerator] + if isinstance(self.database_engine, PostgresEngine): + # We set the `writers` to an empty list here as we don't care about + # missing updates over restarts, as we'll not have anything in our + # caches to invalidate. (This reduces the amount of writes to the DB + # that happen). + self._cache_id_gen = MultiWriterIdGenerator( + db_conn, + database, + stream_name="caches", + instance_name=hs.get_instance_name(), + tables=[ + ( + "cache_invalidation_stream_by_instance", + "instance_name", + "stream_id", + ) + ], + sequence_name="cache_invalidation_stream_seq", + writers=[], + ) + + else: + self._cache_id_gen = None + async def get_all_updated_caches( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> Tuple[List[Tuple[int, tuple]], int, bool]: @@ -193,7 +219,10 @@ def _invalidate_caches_for_event( relates_to: Optional[str], backfilled: bool, ) -> None: - self._invalidate_get_event_cache(event_id) + # This invalidates any local in-memory cached event objects, the original + # process triggering the invalidation is responsible for clearing any external + # cached objects. + self._invalidate_local_get_event_cache(event_id) self.have_seen_event.invalidate((room_id, event_id)) self.get_latest_event_ids_in_room.invalidate((room_id,)) @@ -208,7 +237,7 @@ def _invalidate_caches_for_event( self._events_stream_cache.entity_has_changed(room_id, stream_ordering) if redacts: - self._invalidate_get_event_cache(redacts) + self._invalidate_local_get_event_cache(redacts) # Caches which might leak edits must be invalidated for the event being # redacted. self.get_relations_for_event.invalidate((redacts,)) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index fd3fc298b37a..58177ecec132 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -194,7 +194,7 @@ def delete_expired_event_txn(txn: LoggingTransaction) -> None: # changed its content in the database. We can't call # self._invalidate_cache_and_stream because self.get_event_cache isn't of the # right type. - txn.call_after(self._get_event_cache.invalidate, (event.event_id,)) + self.invalidate_get_event_cache_after_txn(txn, event.event_id) # Send that invalidation to replication so that other workers also invalidate # the event cache. self._send_invalidation_to_replication( diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 422e0e65ca50..73c95ffb6f28 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -436,7 +436,7 @@ async def delete_messages_for_device( (user_id, device_id), None ) - set_tag("last_deleted_stream_id", last_deleted_stream_id) + set_tag("last_deleted_stream_id", str(last_deleted_stream_id)) if last_deleted_stream_id: has_changed = self._device_inbox_stream_cache.has_entity_changed( diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index adde5d097810..ca0fe8c4bea3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -669,7 +669,7 @@ def get_device_stream_token(self) -> int: @trace async def get_user_devices_from_cache( - self, query_list: List[Tuple[str, str]] + self, query_list: List[Tuple[str, Optional[str]]] ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: """Get the devices (and keys if any) for remote users from the cache. @@ -706,8 +706,8 @@ async def get_user_devices_from_cache( else: results[user_id] = await self.get_cached_devices_for_user(user_id) - set_tag("in_cache", results) - set_tag("not_in_cache", user_ids_not_in_cache) + set_tag("in_cache", str(results)) + set_tag("not_in_cache", str(user_ids_not_in_cache)) return user_ids_not_in_cache, results diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 9b293475c871..46c0d061579e 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -22,11 +22,14 @@ List, Optional, Tuple, + Union, cast, + overload, ) import attr from canonicaljson import encode_canonical_json +from typing_extensions import Literal from synapse.api.constants import DeviceKeyAlgorithms from synapse.appservice import ( @@ -113,7 +116,7 @@ async def get_e2e_device_keys_for_federation_query( user_devices = devices[user_id] results = [] for device_id, device in user_devices.items(): - result = {"device_id": device_id} + result: JsonDict = {"device_id": device_id} keys = device.keys if keys: @@ -143,7 +146,7 @@ async def get_e2e_device_keys_for_cs_api( key data. The key data will be a dict in the same format as the DeviceKeys type returned by POST /_matrix/client/r0/keys/query. """ - set_tag("query_list", query_list) + set_tag("query_list", str(query_list)) if not query_list: return {} @@ -156,6 +159,9 @@ async def get_e2e_device_keys_for_cs_api( rv[user_id] = {} for device_id, device_info in device_keys.items(): r = device_info.keys + if r is None: + continue + r["unsigned"] = {} display_name = device_info.display_name if display_name is not None: @@ -164,13 +170,42 @@ async def get_e2e_device_keys_for_cs_api( return rv + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: Literal[False] = False, + ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: + ... + + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: bool = False, + include_deleted_devices: Literal[False] = False, + ) -> Dict[str, Dict[str, DeviceKeyLookupResult]]: + ... + + @overload + async def get_e2e_device_keys_and_signatures( + self, + query_list: Collection[Tuple[str, Optional[str]]], + include_all_devices: Literal[True], + include_deleted_devices: Literal[True], + ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: + ... + @trace async def get_e2e_device_keys_and_signatures( self, - query_list: List[Tuple[str, Optional[str]]], + query_list: Collection[Tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, - ) -> Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]]: + ) -> Union[ + Dict[str, Dict[str, DeviceKeyLookupResult]], + Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]], + ]: """Fetch a list of device keys Any cross-signatures made on the keys by the owner of the device are also @@ -383,7 +418,7 @@ async def add_e2e_one_time_keys( def _add_e2e_one_time_keys(txn: LoggingTransaction) -> None: set_tag("user_id", user_id) set_tag("device_id", device_id) - set_tag("new_keys", new_keys) + set_tag("new_keys", str(new_keys)) # We are protected from race between lookup and insertion due to # a unique constraint. If there is a race of two calls to # `add_e2e_one_time_keys` then they'll conflict and we will only @@ -1044,7 +1079,7 @@ def _claim_e2e_one_time_key_returning( _claim_e2e_one_time_key = _claim_e2e_one_time_key_simple db_autocommit = False - row = await self.db_pool.runInteraction( + claim_row = await self.db_pool.runInteraction( "claim_e2e_one_time_keys", _claim_e2e_one_time_key, user_id, @@ -1052,11 +1087,11 @@ def _claim_e2e_one_time_key_returning( algorithm, db_autocommit=db_autocommit, ) - if row: + if claim_row: device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - device_results[row[0]] = row[1] + device_results[claim_row[0]] = claim_row[1] continue # No one-time key available, so see if there's a fallback @@ -1126,7 +1161,7 @@ def _set_e2e_device_keys_txn(txn: LoggingTransaction) -> bool: set_tag("user_id", user_id) set_tag("device_id", device_id) set_tag("time_now", time_now) - set_tag("device_keys", device_keys) + set_tag("device_keys", str(device_keys)) old_key_json = self.db_pool.simple_select_one_onecol_txn( txn, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index eb4efbb93cec..1f600f119029 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1293,7 +1293,7 @@ def _update_room_depths_txn( depth_updates: Dict[str, int] = {} for event, context in events_and_contexts: # Remove the any existing cache entries for the event_ids - txn.call_after(self.store._invalidate_get_event_cache, event.event_id) + self.store.invalidate_get_event_cache_after_txn(txn, event.event_id) # Then update the `stream_ordering` position to mark the latest # event as the front of the room. This should not be done for # backfilled events because backfilled events have negative @@ -1346,9 +1346,24 @@ def _update_outliers_txn( event_id: outlier for event_id, outlier in txn } + logger.debug( + "_update_outliers_txn: events=%s have_persisted=%s", + [ev.event_id for ev, _ in events_and_contexts], + have_persisted, + ) + to_remove = set() for event, context in events_and_contexts: - if event.event_id not in have_persisted: + outlier_persisted = have_persisted.get(event.event_id) + logger.debug( + "_update_outliers_txn: event=%s outlier=%s outlier_persisted=%s", + event.event_id, + event.internal_metadata.is_outlier(), + outlier_persisted, + ) + + # Ignore events which we haven't persisted at all + if outlier_persisted is None: continue to_remove.add(event) @@ -1358,7 +1373,6 @@ def _update_outliers_txn( # was an outlier or not - what we have is at least as good. continue - outlier_persisted = have_persisted[event.event_id] if not event.internal_metadata.is_outlier() and outlier_persisted: # We received a copy of an event that we had already stored as # an outlier in the database. We now have some state at that event @@ -1369,7 +1383,10 @@ def _update_outliers_txn( # events down /sync. In general they will be historical events, so that # doesn't matter too much, but that is not always the case. - logger.info("Updating state for ex-outlier event %s", event.event_id) + logger.info( + "_update_outliers_txn: Updating state for ex-outlier event %s", + event.event_id, + ) # insert into event_to_state_groups. try: @@ -1669,13 +1686,13 @@ def _add_to_cache( if not row["rejects"] and not row["redacts"]: to_prefill.append(EventCacheEntry(event=event, redacted_event=None)) - def prefill() -> None: + async def prefill() -> None: for cache_entry in to_prefill: - self.store._get_event_cache.set( + await self.store._get_event_cache.set( (cache_entry.event.event_id,), cache_entry ) - txn.call_after(prefill) + txn.async_call_after(prefill) def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: """Invalidate the caches for the redacted event. @@ -1684,7 +1701,7 @@ def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: _invalidate_caches_for_event. """ assert event.redacts is not None - txn.call_after(self.store._invalidate_get_event_cache, event.redacts) + self.store.invalidate_get_event_cache_after_txn(txn, event.redacts) txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index eeca85fc9457..6e8aeed7b4b0 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -67,6 +67,8 @@ class _BackgroundUpdates: EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" + EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -253,6 +255,11 @@ def __init__( replaces_index="ev_edges_id", ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS, + self._background_events_populate_state_key_rejections, + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: @@ -1399,3 +1406,83 @@ def drop_invalid_event_edges_txn(txn: LoggingTransaction) -> bool: ) return batch_size + + async def _background_events_populate_state_key_rejections( + self, progress: JsonDict, batch_size: int + ) -> int: + """Back-populate `events.state_key` and `events.rejection_reason""" + + min_stream_ordering_exclusive = progress["min_stream_ordering_exclusive"] + max_stream_ordering_inclusive = progress["max_stream_ordering_inclusive"] + + def _populate_txn(txn: LoggingTransaction) -> bool: + """Returns True if we're done.""" + + # first we need to find an endpoint. + # we need to find the final row in the batch of batch_size, which means + # we need to skip over (batch_size-1) rows and get the next row. + txn.execute( + """ + SELECT stream_ordering FROM events + WHERE stream_ordering > ? AND stream_ordering <= ? + ORDER BY stream_ordering + LIMIT 1 OFFSET ? + """, + ( + min_stream_ordering_exclusive, + max_stream_ordering_inclusive, + batch_size - 1, + ), + ) + + endpoint = None + row = txn.fetchone() + if row: + endpoint = row[0] + + where_clause = "stream_ordering > ?" + args = [min_stream_ordering_exclusive] + if endpoint: + where_clause += " AND stream_ordering <= ?" + args.append(endpoint) + + # now do the updates. + txn.execute( + f""" + UPDATE events + SET state_key = (SELECT state_key FROM state_events se WHERE se.event_id = events.event_id), + rejection_reason = (SELECT reason FROM rejections rej WHERE rej.event_id = events.event_id) + WHERE ({where_clause}) + """, + args, + ) + + logger.info( + "populated new `events` columns up to %s/%i: updated %i rows", + endpoint, + max_stream_ordering_inclusive, + txn.rowcount, + ) + + if endpoint is None: + # we're done + return True + + progress["min_stream_ordering_exclusive"] = endpoint + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS, + progress, + ) + return False + + done = await self.db_pool.runInteraction( + desc="events_populate_state_key_rejections", func=_populate_txn + ) + + if done: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.EVENTS_POPULATE_STATE_KEY_REJECTIONS + ) + + return batch_size diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b99b10778490..5914a35420e9 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -79,7 +79,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import ObservableDeferred, delay_cancellation from synapse.util.caches.descriptors import cached, cachedList -from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.lrucache import AsyncLruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -238,7 +238,9 @@ def __init__( 5 * 60 * 1000, ) - self._get_event_cache: LruCache[Tuple[str], EventCacheEntry] = LruCache( + self._get_event_cache: AsyncLruCache[ + Tuple[str], EventCacheEntry + ] = AsyncLruCache( cache_name="*getEvent*", max_size=hs.config.caches.event_cache_size, ) @@ -292,25 +294,6 @@ def process_replication_rows( super().process_replication_rows(stream_name, instance_name, token, rows) - async def get_received_ts(self, event_id: str) -> Optional[int]: - """Get received_ts (when it was persisted) for the event. - - Raises an exception for unknown events. - - Args: - event_id: The event ID to query. - - Returns: - Timestamp in milliseconds, or None for events that were persisted - before received_ts was implemented. - """ - return await self.db_pool.simple_select_one_onecol( - table="events", - keyvalues={"event_id": event_id}, - retcol="received_ts", - desc="get_received_ts", - ) - async def have_censored_event(self, event_id: str) -> bool: """Check if an event has been censored, i.e. if the content of the event has been erased from the database due to a redaction. @@ -617,7 +600,7 @@ async def _get_events_from_cache_or_db( Returns: map from event id to result """ - event_entry_map = self._get_events_from_cache( + event_entry_map = await self._get_events_from_cache( event_ids, ) @@ -729,12 +712,46 @@ async def get_missing_events_from_db() -> Dict[str, EventCacheEntry]: return event_entry_map - def _invalidate_get_event_cache(self, event_id: str) -> None: - self._get_event_cache.invalidate((event_id,)) + def invalidate_get_event_cache_after_txn( + self, txn: LoggingTransaction, event_id: str + ) -> None: + """ + Prepares a database transaction to invalidate the get event cache for a given + event ID when executed successfully. This is achieved by attaching two callbacks + to the transaction, one to invalidate the async cache and one for the in memory + sync cache (importantly called in that order). + + Arguments: + txn: the database transaction to attach the callbacks to + event_id: the event ID to be invalidated from caches + """ + + txn.async_call_after(self._invalidate_async_get_event_cache, event_id) + txn.call_after(self._invalidate_local_get_event_cache, event_id) + + async def _invalidate_async_get_event_cache(self, event_id: str) -> None: + """ + Invalidates an event in the asyncronous get event cache, which may be remote. + + Arguments: + event_id: the event ID to invalidate + """ + + await self._get_event_cache.invalidate((event_id,)) + + def _invalidate_local_get_event_cache(self, event_id: str) -> None: + """ + Invalidates an event in local in-memory get event caches. + + Arguments: + event_id: the event ID to invalidate + """ + + self._get_event_cache.invalidate_local((event_id,)) self._event_ref.pop(event_id, None) self._current_event_fetches.pop(event_id, None) - def _get_events_from_cache( + async def _get_events_from_cache( self, events: Iterable[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: """Fetch events from the caches. @@ -749,7 +766,7 @@ def _get_events_from_cache( for event_id in events: # First check if it's in the event cache - ret = self._get_event_cache.get( + ret = await self._get_event_cache.get( (event_id,), None, update_metrics=update_metrics ) if ret: @@ -771,7 +788,7 @@ def _get_events_from_cache( # We add the entry back into the cache as we want to keep # recently queried events in the cache. - self._get_event_cache.set((event_id,), cache_entry) + await self._get_event_cache.set((event_id,), cache_entry) return event_map @@ -965,7 +982,13 @@ def _fetch_event_list( } row_dict = self.db_pool.new_transaction( - conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch + conn, + "do_fetch", + [], + [], + [], + self._fetch_event_rows, + events_to_fetch, ) # We only want to resolve deferreds from the main thread @@ -1148,7 +1171,7 @@ async def _get_events_from_db( event=original_ev, redacted_event=redacted_event ) - self._get_event_cache.set((event_id,), cache_entry) + await self._get_event_cache.set((event_id,), cache_entry) result_map[event_id] = cache_entry if not redacted_event: @@ -1382,7 +1405,9 @@ async def _have_seen_events_dict( # if the event cache contains the event, obviously we've seen it. cache_results = { - (rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,)) + (rid, eid) + for (rid, eid) in keys + if await self._get_event_cache.contains((eid,)) } results = dict.fromkeys(cache_results, True) remaining = [k for k in keys if k not in cache_results] @@ -1465,7 +1490,7 @@ async def get_room_complexity(self, room_id: str) -> Dict[str, float]: async def get_all_new_forward_event_rows( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns new events, for the Events replication stream Args: @@ -1481,10 +1506,11 @@ async def get_all_new_forward_event_rows( def get_all_new_forward_event_rows( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," - " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL" + " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," + " e.outlier" " FROM events AS e" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events AS se USING (event_id)" @@ -1498,7 +1524,8 @@ def get_all_new_forward_event_rows( ) txn.execute(sql, (last_id, current_id, instance_name, limit)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall() + List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + txn.fetchall(), ) return await self.db_pool.runInteraction( @@ -1507,7 +1534,7 @@ def get_all_new_forward_event_rows( async def get_ex_outlier_stream_rows( self, instance_name: str, last_id: int, current_id: int - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: """Returns de-outliered events, for the Events replication stream Args: @@ -1522,11 +1549,14 @@ async def get_ex_outlier_stream_rows( def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, - ) -> List[Tuple[int, str, str, str, str, str, str, str, str]]: + ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," - " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL" + " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," + " e.outlier" " FROM events AS e" + # NB: the next line (inner join) is what makes this query different from + # get_all_new_forward_event_rows. " INNER JOIN ex_outlier_stream AS out USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events AS se USING (event_id)" @@ -1541,7 +1571,8 @@ def get_ex_outlier_stream_rows_txn( txn.execute(sql, (last_id, current_id, instance_name)) return cast( - List[Tuple[int, str, str, str, str, str, str, str, str]], txn.fetchall() + List[Tuple[int, str, str, str, str, str, str, str, bool, bool]], + txn.fetchall(), ) return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 9a63f953fb5e..efd136a86474 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -66,6 +66,7 @@ def __init__( "initialise_mau_threepids", [], [], + [], self._initialise_reserved_users, hs.config.server.mau_limits_reserved_threepids[: self._max_mau_value], ) diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 87b0d090391a..f6822707e43c 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -19,6 +19,8 @@ from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore +from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) @@ -302,7 +304,7 @@ def _purge_history_txn( self._invalidate_cache_and_stream( txn, self.have_seen_event, (room_id, event_id) ) - self._invalidate_get_event_cache(event_id) + self.invalidate_get_event_cache_after_txn(txn, event_id) logger.info("[purge] done") @@ -317,11 +319,38 @@ async def purge_room(self, room_id: str) -> List[int]: Returns: The list of state groups to delete. """ - return await self.db_pool.runInteraction( - "purge_room", self._purge_room_txn, room_id + + # This first runs the purge transaction with READ_COMMITTED isolation level, + # meaning any new rows in the tables will not trigger a serialization error. + # We then run the same purge a second time without this isolation level to + # purge any of those rows which were added during the first. + + state_groups_to_delete = await self.db_pool.runInteraction( + "purge_room", + self._purge_room_txn, + room_id=room_id, + isolation_level=IsolationLevel.READ_COMMITTED, + ) + + state_groups_to_delete.extend( + await self.db_pool.runInteraction( + "purge_room", + self._purge_room_txn, + room_id=room_id, + ), ) + return state_groups_to_delete + def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: + # This collides with event persistence so we cannot write new events and metadata into + # a room while deleting it or this transaction will fail. + if isinstance(self.database_engine, PostgresEngine): + txn.execute( + "SELECT room_version FROM rooms WHERE room_id = ? FOR UPDATE", + (room_id,), + ) + # First, fetch all the state groups that should be deleted, before # we delete that information. txn.execute( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 86649c1e6cd6..768f95d16cf0 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -228,6 +228,7 @@ async def bulk_get_push_rules( iterable=user_ids, retcols=("*",), desc="bulk_get_push_rules", + batch_size=1000, ) rows.sort(key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 13d6a1d5c0d9..d6d485507b17 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -175,7 +175,7 @@ def get_room_with_stats_txn( rooms.creator, state.encryption, state.is_federatable AS federatable, rooms.is_public AS public, state.join_rules, state.guest_access, state.history_visibility, curr.current_state_events AS state_events, - state.avatar, state.topic + state.avatar, state.topic, state.room_type FROM rooms LEFT JOIN room_stats_state state USING (room_id) LEFT JOIN room_stats_current curr USING (room_id) @@ -596,7 +596,8 @@ async def get_rooms_paginate( SELECT state.room_id, state.name, state.canonical_alias, curr.joined_members, curr.local_users_in_room, rooms.room_version, rooms.creator, state.encryption, state.is_federatable, rooms.is_public, state.join_rules, - state.guest_access, state.history_visibility, curr.current_state_events + state.guest_access, state.history_visibility, curr.current_state_events, + state.room_type FROM room_stats_state state INNER JOIN room_stats_current curr USING (room_id) INNER JOIN rooms USING (room_id) @@ -646,6 +647,7 @@ def _get_rooms_paginate_txn( "guest_access": room[11], "history_visibility": room[12], "state_events": room[13], + "room_type": room[14], } ) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 0b5e4e425412..e2cccc688c9f 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -21,6 +21,7 @@ FrozenSet, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -31,7 +32,6 @@ from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase -from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import ( run_as_background_process, @@ -56,6 +56,7 @@ from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList +from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -184,7 +185,7 @@ def _check_safe_current_state_events_membership_updated_txn( self._check_safe_current_state_events_membership_updated_txn, ) - @cached(max_entries=100000, iterable=True, prune_unread_entries=False) + @cached(max_entries=100000, iterable=True) async def get_users_in_room(self, room_id: str) -> List[str]: return await self.db_pool.runInteraction( "get_users_in_room", self.get_users_in_room_txn, room_id @@ -244,7 +245,7 @@ def _get_subset_users_in_room_with_profiles( txn: LoggingTransaction, ) -> Dict[str, ProfileInfo]: clause, ids = make_in_list_sql_clause( - self.database_engine, "m.user_id", user_ids + self.database_engine, "c.state_key", user_ids ) sql = """ @@ -562,7 +563,7 @@ async def get_local_current_membership_for_user_in_room( return results_dict.get("membership"), results_dict.get("event_id") - @cached(max_entries=500000, iterable=True, prune_unread_entries=False) + @cached(max_entries=500000, iterable=True) async def get_rooms_for_user_with_stream_ordering( self, user_id: str ) -> FrozenSet[GetRoomsForUserWithStreamOrdering]: @@ -733,25 +734,76 @@ async def get_rooms_for_user( ) return frozenset(r.room_id for r in rooms) - @cached( - max_entries=500000, - cache_context=True, - iterable=True, - prune_unread_entries=False, + @cached(max_entries=10000) + async def does_pair_of_users_share_a_room( + self, user_id: str, other_user_id: str + ) -> bool: + raise NotImplementedError() + + @cachedList( + cached_method_name="does_pair_of_users_share_a_room", list_name="other_user_ids" ) - async def get_users_who_share_room_with_user( - self, user_id: str, cache_context: _CacheContext + async def _do_users_share_a_room( + self, user_id: str, other_user_ids: Collection[str] + ) -> Mapping[str, Optional[bool]]: + """Return mapping from user ID to whether they share a room with the + given user. + + Note: `None` and `False` are equivalent and mean they don't share a + room. + """ + + def do_users_share_a_room_txn( + txn: LoggingTransaction, user_ids: Collection[str] + ) -> Dict[str, bool]: + clause, args = make_in_list_sql_clause( + self.database_engine, "state_key", user_ids + ) + + # This query works by fetching both the list of rooms for the target + # user and the set of other users, and then checking if there is any + # overlap. + sql = f""" + SELECT b.state_key + FROM ( + SELECT room_id FROM current_state_events + WHERE type = 'm.room.member' AND membership = 'join' AND state_key = ? + ) AS a + INNER JOIN ( + SELECT room_id, state_key FROM current_state_events + WHERE type = 'm.room.member' AND membership = 'join' AND {clause} + ) AS b using (room_id) + LIMIT 1 + """ + + txn.execute(sql, (user_id, *args)) + return {u: True for u, in txn} + + to_return = {} + for batch_user_ids in batch_iter(other_user_ids, 1000): + res = await self.db_pool.runInteraction( + "do_users_share_a_room", do_users_share_a_room_txn, batch_user_ids + ) + to_return.update(res) + + return to_return + + async def do_users_share_a_room( + self, user_id: str, other_user_ids: Collection[str] ) -> Set[str]: + """Return the set of users who share a room with the first users""" + + user_dict = await self._do_users_share_a_room(user_id, other_user_ids) + + return {u for u, share_room in user_dict.items() if share_room} + + async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: """Returns the set of users who share a room with `user_id`""" - room_ids = await self.get_rooms_for_user( - user_id, on_invalidate=cache_context.invalidate - ) + room_ids = await self.get_rooms_for_user(user_id) user_who_share_room = set() for room_id in room_ids: - user_ids = await self.get_users_in_room( - room_id, on_invalidate=cache_context.invalidate - ) + user_ids = await self.get_users_in_room(room_id) user_who_share_room.update(user_ids) return user_who_share_room @@ -780,26 +832,8 @@ async def get_mutual_rooms_between_users( return shared_room_ids or frozenset() - async def get_joined_users_from_context( - self, event: EventBase, context: EventContext - ) -> Dict[str, ProfileInfo]: - state_group: Union[object, int] = context.state_group - if not state_group: - # If state_group is None it means it has yet to be assigned a - # state group, i.e. we need to make sure that calls with a state_group - # of None don't hit previous cached calls with a None state_group. - # To do this we set the state_group to a new object as object() != object() - state_group = object() - - current_state_ids = await context.get_current_state_ids() - assert current_state_ids is not None - assert state_group is not None - return await self._get_joined_users_from_context( - event.room_id, state_group, current_state_ids, event=event, context=context - ) - async def get_joined_users_from_state( - self, room_id: str, state_entry: "_StateCacheEntry" + self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" ) -> Dict[str, ProfileInfo]: state_group: Union[object, int] = state_entry.state_group if not state_group: @@ -812,18 +846,17 @@ async def get_joined_users_from_state( assert state_group is not None with Measure(self._clock, "get_joined_users_from_state"): return await self._get_joined_users_from_context( - room_id, state_group, state_entry.state, context=state_entry + room_id, state_group, state, context=state_entry ) - @cached(num_args=2, cache_context=True, iterable=True, max_entries=100000) + @cached(num_args=2, iterable=True, max_entries=100000) async def _get_joined_users_from_context( self, room_id: str, state_group: Union[object, int], current_state_ids: StateMap[str], - cache_context: _CacheContext, event: Optional[EventBase] = None, - context: Optional[Union[EventContext, "_StateCacheEntry"]] = None, + context: Optional["_StateCacheEntry"] = None, ) -> Dict[str, ProfileInfo]: # We don't use `state_group`, it's there so that we can cache based # on it. However, it's important that it's never None, since two current_states @@ -863,7 +896,9 @@ async def _get_joined_users_from_context( # We don't update the event cache hit ratio as it completely throws off # the hit ratio counts. After all, we don't populate the cache if we # miss it here - event_map = self._get_events_from_cache(member_event_ids, update_metrics=False) + event_map = await self._get_events_from_cache( + member_event_ids, update_metrics=False + ) missing_member_event_ids = [] for event_id in member_event_ids: @@ -922,7 +957,7 @@ async def _get_joined_profiles_from_event_ids( iterable=event_ids, retcols=("user_id", "display_name", "avatar_url", "event_id"), keyvalues={"membership": Membership.JOIN}, - batch_size=500, + batch_size=1000, desc="_get_joined_profiles_from_event_ids", ) @@ -1017,7 +1052,7 @@ def get_current_hosts_in_room_txn(txn: LoggingTransaction) -> Set[str]: ) async def get_joined_hosts( - self, room_id: str, state_entry: "_StateCacheEntry" + self, room_id: str, state: StateMap[str], state_entry: "_StateCacheEntry" ) -> FrozenSet[str]: state_group: Union[object, int] = state_entry.state_group if not state_group: @@ -1030,7 +1065,7 @@ async def get_joined_hosts( assert state_group is not None with Measure(self._clock, "get_joined_hosts"): return await self._get_joined_hosts( - room_id, state_group, state_entry=state_entry + room_id, state_group, state, state_entry=state_entry ) @cached(num_args=2, max_entries=10000, iterable=True) @@ -1038,6 +1073,7 @@ async def _get_joined_hosts( self, room_id: str, state_group: Union[object, int], + state: StateMap[str], state_entry: "_StateCacheEntry", ) -> FrozenSet[str]: # We don't use `state_group`, it's there so that we can cache based on @@ -1093,7 +1129,7 @@ async def _get_joined_hosts( # The cache doesn't match the state group or prev state group, # so we calculate the result from first principles. joined_users = await self.get_joined_users_from_state( - room_id, state_entry + room_id, state, state_entry ) cache.hosts_to_joined_users = {} diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 3a1df7776c90..2590b52f7352 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1022,8 +1022,8 @@ def _get_events_around_txn( } async def get_all_new_events_stream( - self, from_id: int, current_id: int, limit: int - ) -> Tuple[int, List[EventBase]]: + self, from_id: int, current_id: int, limit: int, get_prev_content: bool = False + ) -> Tuple[int, List[EventBase], Dict[str, Optional[int]]]: """Get all new events Returns all events with from_id < stream_ordering <= current_id. @@ -1032,19 +1032,21 @@ async def get_all_new_events_stream( from_id: the stream_ordering of the last event we processed current_id: the stream_ordering of the most recently processed event limit: the maximum number of events to return + get_prev_content: whether to fetch previous event content Returns: - A tuple of (next_id, events), where `next_id` is the next value to - pass as `from_id` (it will either be the stream_ordering of the - last returned event, or, if fewer than `limit` events were found, - the `current_id`). + A tuple of (next_id, events, event_to_received_ts), where `next_id` + is the next value to pass as `from_id` (it will either be the + stream_ordering of the last returned event, or, if fewer than `limit` + events were found, the `current_id`). The `event_to_received_ts` is + a dictionary mapping event ID to the event `received_ts`. """ def get_all_new_events_stream_txn( txn: LoggingTransaction, - ) -> Tuple[int, List[str]]: + ) -> Tuple[int, Dict[str, Optional[int]]]: sql = ( - "SELECT e.stream_ordering, e.event_id" + "SELECT e.stream_ordering, e.event_id, e.received_ts" " FROM events AS e" " WHERE" " ? < e.stream_ordering AND e.stream_ordering <= ?" @@ -1059,15 +1061,21 @@ def get_all_new_events_stream_txn( if len(rows) == limit: upper_bound = rows[-1][0] - return upper_bound, [row[1] for row in rows] + event_to_received_ts: Dict[str, Optional[int]] = { + row[1]: row[2] for row in rows + } + return upper_bound, event_to_received_ts - upper_bound, event_ids = await self.db_pool.runInteraction( + upper_bound, event_to_received_ts = await self.db_pool.runInteraction( "get_all_new_events_stream", get_all_new_events_stream_txn ) - events = await self.get_events_as_list(event_ids) + events = await self.get_events_as_list( + event_to_received_ts.keys(), + get_prev_content=get_prev_content, + ) - return upper_bound, events + return upper_bound, events, event_to_received_ts async def get_federation_out_pos(self, typ: str) -> int: if self._need_to_reset_federation_stream_positions: diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index fa9eadaca7ea..a7fcc564a992 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -24,6 +24,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.state import StateFilter from synapse.types import MutableStateMap, StateMap +from synapse.util.caches import intern_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -136,7 +137,7 @@ def _get_state_groups_from_groups_txn( txn.execute(sql % (where_clause,), args) for row in txn: typ, state_key, event_id = row - key = (typ, state_key) + key = (intern_string(typ), intern_string(state_key)) results[group][key] = event_id else: max_entries_returned = state_filter.max_entries_returned() diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 609a2b88bfbf..bb64543c1f2d 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -202,7 +202,14 @@ def _get_state_for_group_using_cache( requests state from the cache, if False we need to query the DB for the missing state. """ - cache_entry = cache.get(group) + # If we are asked explicitly for a subset of keys, we only ask for those + # from the cache. This ensures that the `DictionaryCache` can make + # better decisions about what to cache and what to expire. + dict_keys = None + if not state_filter.has_wildcards(): + dict_keys = state_filter.concrete_types() + + cache_entry = cache.get(group, dict_keys=dict_keys) state_dict_ids = cache_entry.value if cache_entry.full or state_filter.is_full(): @@ -400,14 +407,17 @@ async def store_state_group( room_id: str, prev_group: Optional[int], delta_ids: Optional[StateMap[str]], - current_state_ids: StateMap[str], + current_state_ids: Optional[StateMap[str]], ) -> int: """Store a new set of state, returning a newly assigned state group. + At least one of `current_state_ids` and `prev_group` must be provided. Whenever + `prev_group` is not None, `delta_ids` must also not be None. + Args: event_id: The event ID for which the state was calculated room_id - prev_group: A previous state group for the room, optional. + prev_group: A previous state group for the room. delta_ids: The delta between state at `prev_group` and `current_state_ids`, if `prev_group` was given. Same format as `current_state_ids`. @@ -418,10 +428,41 @@ async def store_state_group( The state group ID """ - def _store_state_group_txn(txn: LoggingTransaction) -> int: - if current_state_ids is None: - # AFAIK, this can never happen - raise Exception("current_state_ids cannot be None") + if prev_group is None and current_state_ids is None: + raise Exception("current_state_ids and prev_group can't both be None") + + if prev_group is not None and delta_ids is None: + raise Exception("delta_ids is None when prev_group is not None") + + def insert_delta_group_txn( + txn: LoggingTransaction, prev_group: int, delta_ids: StateMap[str] + ) -> Optional[int]: + """Try and persist the new group as a delta. + + Requires that we have the state as a delta from a previous state group. + + Returns: + The state group if successfully created, or None if the state + needs to be persisted as a full state. + """ + is_in_db = self.db_pool.simple_select_one_onecol_txn( + txn, + table="state_groups", + keyvalues={"id": prev_group}, + retcol="id", + allow_none=True, + ) + if not is_in_db: + raise Exception( + "Trying to persist state with unpersisted prev_group: %r" + % (prev_group,) + ) + + # if the chain of state group deltas is going too long, we fall back to + # persisting a complete state group. + potential_hops = self._count_state_group_hops_txn(txn, prev_group) + if potential_hops >= MAX_STATE_DELTA_HOPS: + return None state_group = self._state_group_seq_gen.get_next_id_txn(txn) @@ -431,51 +472,45 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int: values={"id": state_group, "room_id": room_id, "event_id": event_id}, ) - # We persist as a delta if we can, while also ensuring the chain - # of deltas isn't tooo long, as otherwise read performance degrades. - if prev_group: - is_in_db = self.db_pool.simple_select_one_onecol_txn( - txn, - table="state_groups", - keyvalues={"id": prev_group}, - retcol="id", - allow_none=True, - ) - if not is_in_db: - raise Exception( - "Trying to persist state with unpersisted prev_group: %r" - % (prev_group,) - ) - - potential_hops = self._count_state_group_hops_txn(txn, prev_group) - if prev_group and potential_hops < MAX_STATE_DELTA_HOPS: - assert delta_ids is not None - - self.db_pool.simple_insert_txn( - txn, - table="state_group_edges", - values={"state_group": state_group, "prev_state_group": prev_group}, - ) + self.db_pool.simple_insert_txn( + txn, + table="state_group_edges", + values={"state_group": state_group, "prev_state_group": prev_group}, + ) - self.db_pool.simple_insert_many_txn( - txn, - table="state_groups_state", - keys=("state_group", "room_id", "type", "state_key", "event_id"), - values=[ - (state_group, room_id, key[0], key[1], state_id) - for key, state_id in delta_ids.items() - ], - ) - else: - self.db_pool.simple_insert_many_txn( - txn, - table="state_groups_state", - keys=("state_group", "room_id", "type", "state_key", "event_id"), - values=[ - (state_group, room_id, key[0], key[1], state_id) - for key, state_id in current_state_ids.items() - ], - ) + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + (state_group, room_id, key[0], key[1], state_id) + for key, state_id in delta_ids.items() + ], + ) + + return state_group + + def insert_full_state_txn( + txn: LoggingTransaction, current_state_ids: StateMap[str] + ) -> int: + """Persist the full state, returning the new state group.""" + state_group = self._state_group_seq_gen.get_next_id_txn(txn) + + self.db_pool.simple_insert_txn( + txn, + table="state_groups", + values={"id": state_group, "room_id": room_id, "event_id": event_id}, + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + (state_group, room_id, key[0], key[1], state_id) + for key, state_id in current_state_ids.items() + ], + ) # Prefill the state group caches with this group. # It's fine to use the sequence like this as the state group map @@ -491,7 +526,7 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int: self._state_group_members_cache.update, self._state_group_members_cache.sequence, key=state_group, - value=dict(current_member_state_ids), + value=current_member_state_ids, ) current_non_member_state_ids = { @@ -503,13 +538,35 @@ def _store_state_group_txn(txn: LoggingTransaction) -> int: self._state_group_cache.update, self._state_group_cache.sequence, key=state_group, - value=dict(current_non_member_state_ids), + value=current_non_member_state_ids, ) return state_group + if prev_group is not None: + state_group = await self.db_pool.runInteraction( + "store_state_group.insert_delta_group", + insert_delta_group_txn, + prev_group, + delta_ids, + ) + if state_group is not None: + return state_group + + # We're going to persist the state as a complete group rather than + # a delta, so first we need to ensure we have loaded the state map + # from the database. + if current_state_ids is None: + assert prev_group is not None + assert delta_ids is not None + groups = await self._get_state_for_groups([prev_group]) + current_state_ids = dict(groups[prev_group]) + current_state_ids.update(delta_ids) + return await self.db_pool.runInteraction( - "store_state_group", _store_state_group_txn + "store_state_group.insert_full_state", + insert_full_state_txn, + current_state_ids, ) async def purge_unreferenced_state_groups( diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index dc237e3032f8..a9a88c8bfd8b 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -74,13 +74,14 @@ Changes in SCHEMA_VERSION = 72: - event_edges.(room_id, is_state) are no longer written to. + - Tables related to groups are dropped. """ SCHEMA_COMPAT_VERSION = ( - # We no longer maintain `event_edges.room_id`, so synapses with SCHEMA_VERSION < 71 - # will break. - 71 + # The groups tables are no longer accessible, so synapses with SCHEMA_VERSION < 72 + # could break. + 72 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py new file mode 100644 index 000000000000..55a5d092cc67 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03bg_populate_events_columns.py @@ -0,0 +1,47 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from synapse.storage.types import Cursor + + +def run_create(cur: Cursor, database_engine, *args, **kwargs): + """Add a bg update to populate the `state_key` and `rejection_reason` columns of `events`""" + + # we know that any new events will have the columns populated (and that has been + # the case since schema_version 68, so there is no chance of rolling back now). + # + # So, we only need to make sure that existing rows are updated. We read the + # current min and max stream orderings, since that is guaranteed to include all + # the events that were stored before the new columns were added. + cur.execute("SELECT MIN(stream_ordering), MAX(stream_ordering) FROM events") + (min_stream_ordering, max_stream_ordering) = cur.fetchone() + + if min_stream_ordering is None: + # no rows, nothing to do. + return + + cur.execute( + "INSERT into background_updates (ordering, update_name, progress_json)" + " VALUES (7203, 'events_populate_state_key_rejections', ?)", + ( + json.dumps( + { + "min_stream_ordering_exclusive": min_stream_ordering - 1, + "max_stream_ordering_inclusive": max_stream_ordering, + } + ), + ), + ) diff --git a/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql b/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql new file mode 100644 index 000000000000..0da668aa3ae9 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03drop_event_reference_hashes.sql @@ -0,0 +1,17 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- event_reference_hashes is unused, so we can drop it +DROP TABLE event_reference_hashes; diff --git a/synapse/storage/schema/main/delta/72/03remove_groups.sql b/synapse/storage/schema/main/delta/72/03remove_groups.sql new file mode 100644 index 000000000000..b7c5894de877 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/03remove_groups.sql @@ -0,0 +1,31 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Remove the tables which powered the unspecced groups/communities feature. +DROP TABLE IF EXISTS group_attestations_remote; +DROP TABLE IF EXISTS group_attestations_renewals; +DROP TABLE IF EXISTS group_invites; +DROP TABLE IF EXISTS group_roles; +DROP TABLE IF EXISTS group_room_categories; +DROP TABLE IF EXISTS group_rooms; +DROP TABLE IF EXISTS group_summary_roles; +DROP TABLE IF EXISTS group_summary_room_categories; +DROP TABLE IF EXISTS group_summary_rooms; +DROP TABLE IF EXISTS group_summary_users; +DROP TABLE IF EXISTS group_users; +DROP TABLE IF EXISTS groups; +DROP TABLE IF EXISTS local_group_membership; +DROP TABLE IF EXISTS local_group_updates; +DROP TABLE IF EXISTS remote_profile_cache; diff --git a/synapse/storage/util/partial_state_events_tracker.py b/synapse/storage/util/partial_state_events_tracker.py index 211437cfaa99..466e5137f2d3 100644 --- a/synapse/storage/util/partial_state_events_tracker.py +++ b/synapse/storage/util/partial_state_events_tracker.py @@ -166,6 +166,7 @@ async def await_full_state(self, room_id: str) -> None: logger.info( "Awaiting un-partial-stating of room %s", room_id, + stack_info=True, ) await make_deferred_yieldable(d) diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index d267703df0f2..fa91479c97f6 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -14,11 +14,13 @@ import enum import logging import threading -from typing import Any, Dict, Generic, Iterable, Optional, Set, TypeVar +from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union import attr +from typing_extensions import Literal from synapse.util.caches.lrucache import LruCache +from synapse.util.caches.treecache import TreeCache logger = logging.getLogger(__name__) @@ -33,10 +35,12 @@ # This class can't be generic because it uses slots with attrs. # See: https://github.com/python-attrs/attrs/issues/313 -@attr.s(slots=True, auto_attribs=True) +@attr.s(slots=True, frozen=True, auto_attribs=True) class DictionaryEntry: # should be: Generic[DKT, DV]. """Returned when getting an entry from the cache + If `full` is true then `known_absent` will be the empty set. + Attributes: full: Whether the cache has the full or dict or just some keys. If not full then not all requested keys will necessarily be present @@ -53,20 +57,90 @@ def __len__(self) -> int: return len(self.value) +class _FullCacheKey(enum.Enum): + """The key we use to cache the full dict.""" + + KEY = object() + + class _Sentinel(enum.Enum): # defining a sentinel in this way allows mypy to correctly handle the # type of a dictionary lookup. sentinel = object() +class _PerKeyValue(Generic[DV]): + """The cached value of a dictionary key. If `value` is the sentinel, + indicates that the requested key is known to *not* be in the full dict. + """ + + __slots__ = ["value"] + + def __init__(self, value: Union[DV, Literal[_Sentinel.sentinel]]) -> None: + self.value = value + + def __len__(self) -> int: + # We add a `__len__` implementation as we use this class in a cache + # where the values are variable length. + return 1 + + class DictionaryCache(Generic[KT, DKT, DV]): """Caches key -> dictionary lookups, supporting caching partial dicts, i.e. fetching a subset of dictionary keys for a particular key. + + This cache has two levels of key. First there is the "cache key" (of type + `KT`), which maps to a dict. The keys to that dict are the "dict key" (of + type `DKT`). The overall structure is therefore `KT->DKT->DV`. For + example, it might look like: + + { + 1: { 1: "a", 2: "b" }, + 2: { 1: "c" }, + } + + It is possible to look up either individual dict keys, or the *complete* + dict for a given cache key. + + Each dict item, and the complete dict is treated as a separate LRU + entry for the purpose of cache expiry. For example, given: + dict_cache.get(1, None) -> DictionaryEntry({1: "a", 2: "b"}) + dict_cache.get(1, [1]) -> DictionaryEntry({1: "a"}) + dict_cache.get(1, [2]) -> DictionaryEntry({2: "b"}) + + ... then the cache entry for the complete dict will expire first, + followed by the cache entry for the '1' dict key, and finally that + for the '2' dict key. """ def __init__(self, name: str, max_entries: int = 1000): - self.cache: LruCache[KT, DictionaryEntry] = LruCache( - max_size=max_entries, cache_name=name, size_callback=len + # We use a single LruCache to store two different types of entries: + # 1. Map from (key, dict_key) -> dict value (or sentinel, indicating + # the key doesn't exist in the dict); and + # 2. Map from (key, _FullCacheKey.KEY) -> full dict. + # + # The former is used when explicit keys of the dictionary are looked up, + # and the latter when the full dictionary is requested. + # + # If when explicit keys are requested and not in the cache, we then look + # to see if we have the full dict and use that if we do. If found in the + # full dict each key is added into the cache. + # + # This set up allows the `LruCache` to prune the full dict entries if + # they haven't been used in a while, even when there have been recent + # queries for subsets of the dict. + # + # Typing: + # * A key of `(KT, DKT)` has a value of `_PerKeyValue` + # * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]` + self.cache: LruCache[ + Tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]], + Union[_PerKeyValue, Dict[DKT, DV]], + ] = LruCache( + max_size=max_entries, + cache_name=name, + cache_type=TreeCache, + size_callback=len, ) self.name = name @@ -91,23 +165,83 @@ def get( Args: key dict_keys: If given a set of keys then return only those keys - that exist in the cache. + that exist in the cache. If None then returns the full dict + if it is in the cache. Returns: - DictionaryEntry + DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry` + will contain include the keys that are in the cache. If None then + will either return the full dict if in the cache, or the empty + dict (with `full` set to False) if it isn't. """ - entry = self.cache.get(key, _Sentinel.sentinel) - if entry is not _Sentinel.sentinel: - if dict_keys is None: - return DictionaryEntry( - entry.full, entry.known_absent, dict(entry.value) - ) + if dict_keys is None: + # The caller wants the full set of dictionary keys for this cache key + return self._get_full_dict(key) + + # We are being asked for a subset of keys. + + # First go and check for each requested dict key in the cache, tracking + # which we couldn't find. + values = {} + known_absent = set() + missing = [] + for dict_key in dict_keys: + entry = self.cache.get((key, dict_key), _Sentinel.sentinel) + if entry is _Sentinel.sentinel: + missing.append(dict_key) + continue + + assert isinstance(entry, _PerKeyValue) + + if entry.value is _Sentinel.sentinel: + known_absent.add(dict_key) else: - return DictionaryEntry( - entry.full, - entry.known_absent, - {k: entry.value[k] for k in dict_keys if k in entry.value}, - ) + values[dict_key] = entry.value + + # If we found everything we can return immediately. + if not missing: + return DictionaryEntry(False, known_absent, values) + + # We are missing some keys, so check if we happen to have the full dict in + # the cache. + # + # We don't update the last access time for this cache fetch, as we + # aren't explicitly interested in the full dict and so we don't want + # requests for explicit dict keys to keep the full dict in the cache. + entry = self.cache.get( + (key, _FullCacheKey.KEY), + _Sentinel.sentinel, + update_last_access=False, + ) + if entry is _Sentinel.sentinel: + # Not in the cache, return the subset of keys we found. + return DictionaryEntry(False, known_absent, values) + + # We have the full dict! + assert isinstance(entry, dict) + + for dict_key in missing: + # We explicitly add each dict key to the cache, so that cache hit + # rates and LRU times for each key can be tracked separately. + value = entry.get(dict_key, _Sentinel.sentinel) # type: ignore[arg-type] + self.cache[(key, dict_key)] = _PerKeyValue(value) + + if value is not _Sentinel.sentinel: + values[dict_key] = value + + return DictionaryEntry(True, set(), values) + + def _get_full_dict( + self, + key: KT, + ) -> DictionaryEntry: + """Fetch the full dict for the given key.""" + + # First we check if we have cached the full dict. + entry = self.cache.get((key, _FullCacheKey.KEY), _Sentinel.sentinel) + if entry is not _Sentinel.sentinel: + assert isinstance(entry, dict) + return DictionaryEntry(True, set(), entry) return DictionaryEntry(False, set(), {}) @@ -117,7 +251,13 @@ def invalidate(self, key: KT) -> None: # Increment the sequence number so that any SELECT statements that # raced with the INSERT don't update the cache (SYN-369) self.sequence += 1 - self.cache.pop(key, None) + + # We want to drop all information about the dict for the given key, so + # we use `del_multi` to delete it all in one go. + # + # We ignore the type error here: `del_multi` accepts a truncated key + # (when the key type is a tuple). + self.cache.del_multi((key,)) # type: ignore[arg-type] def invalidate_all(self) -> None: self.check_thread() @@ -131,7 +271,16 @@ def update( value: Dict[DKT, DV], fetched_keys: Optional[Iterable[DKT]] = None, ) -> None: - """Updates the entry in the cache + """Updates the entry in the cache. + + Note: This does *not* invalidate any existing entries for the `key`. + In particular, if we add an entry for the cached "full dict" with + `fetched_keys=None`, existing entries for individual dict keys are + not invalidated. Likewise, adding entries for individual keys does + not invalidate any cached value for the full dict. + + In other words: if the underlying data is *changed*, the cache must + be explicitly invalidated via `.invalidate()`. Args: sequence @@ -149,20 +298,27 @@ def update( # Only update the cache if the caches sequence number matches the # number that the cache had before the SELECT was started (SYN-369) if fetched_keys is None: - self._insert(key, value, set()) + self.cache[(key, _FullCacheKey.KEY)] = value else: - self._update_or_insert(key, value, fetched_keys) + self._update_subset(key, value, fetched_keys) - def _update_or_insert( - self, key: KT, value: Dict[DKT, DV], known_absent: Iterable[DKT] + def _update_subset( + self, key: KT, value: Dict[DKT, DV], fetched_keys: Iterable[DKT] ) -> None: - # We pop and reinsert as we need to tell the cache the size may have - # changed + """Add the given dictionary values as explicit keys in the cache. + + Args: + key: top-level cache key + value: The dictionary with all the values that we should cache + fetched_keys: The full set of dict keys that were looked up. Any keys + here not in `value` should be marked as "known absent". + """ + + for dict_key, dict_value in value.items(): + self.cache[(key, dict_key)] = _PerKeyValue(dict_value) - entry: DictionaryEntry = self.cache.pop(key, DictionaryEntry(False, set(), {})) - entry.value.update(value) - entry.known_absent.update(known_absent) - self.cache[key] = entry + for dict_key in fetched_keys: + if dict_key in value: + continue - def _insert(self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]) -> None: - self.cache[key] = DictionaryEntry(True, known_absent, value) + self.cache[(key, dict_key)] = _PerKeyValue(_Sentinel.sentinel) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 8ed5325c5d07..b3bdedb04cad 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -25,8 +25,10 @@ Collection, Dict, Generic, + Iterable, List, Optional, + Tuple, Type, TypeVar, Union, @@ -44,7 +46,11 @@ from synapse.metrics.jemalloc import get_jemalloc_stats from synapse.util import Clock, caches from synapse.util.caches import CacheMetric, EvictionReason, register_cache -from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry +from synapse.util.caches.treecache import ( + TreeCache, + iterate_tree_cache_entry, + iterate_tree_cache_items, +) from synapse.util.linked_list import ListNode if TYPE_CHECKING: @@ -537,6 +543,7 @@ def cache_get( default: Literal[None] = None, callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., + update_last_access: bool = ..., ) -> Optional[VT]: ... @@ -546,6 +553,7 @@ def cache_get( default: T, callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., + update_last_access: bool = ..., ) -> Union[T, VT]: ... @@ -555,10 +563,27 @@ def cache_get( default: Optional[T] = None, callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, + update_last_access: bool = True, ) -> Union[None, T, VT]: + """Look up a key in the cache + + Args: + key + default + callbacks: A collection of callbacks that will fire when the + node is removed from the cache (either due to invalidation + or expiry). + update_metrics: Whether to update the hit rate metrics + update_last_access: Whether to update the last access metrics + on a node if successfully fetched. These metrics are used + to determine when to remove the node from the cache. Set + to False if this fetch should *not* prevent a node from + being expired. + """ node = cache.get(key, None) if node is not None: - move_node_to_front(node) + if update_last_access: + move_node_to_front(node) node.add_callbacks(callbacks) if update_metrics and metrics: metrics.inc_hits() @@ -568,6 +593,65 @@ def cache_get( metrics.inc_misses() return default + @overload + def cache_get_multi( + key: tuple, + default: Literal[None] = None, + update_metrics: bool = True, + ) -> Union[None, Iterable[Tuple[KT, VT]]]: + ... + + @overload + def cache_get_multi( + key: tuple, + default: T, + update_metrics: bool = True, + ) -> Union[T, Iterable[Tuple[KT, VT]]]: + ... + + @synchronized + def cache_get_multi( + key: tuple, + default: Optional[T] = None, + update_metrics: bool = True, + ) -> Union[None, T, Iterable[Tuple[KT, VT]]]: + """Returns a generator yielding all entries under the given key. + + Can only be used if backed by a tree cache. + + Example: + + cache = LruCache(10, cache_type=TreeCache) + cache[(1, 1)] = "a" + cache[(1, 2)] = "b" + cache[(2, 1)] = "c" + + items = cache.get_multi((1,)) + assert list(items) == [((1, 1), "a"), ((1, 2), "b")] + + Returns: + Either default if the key doesn't exist, or a generator of the + key/value pairs. + """ + + assert isinstance(cache, TreeCache) + + node = cache.get(key, None) + if node is not None: + if update_metrics and metrics: + metrics.inc_hits() + + # We store entries in the `TreeCache` with values of type `_Node`, + # which we need to unwrap. + return ( + (full_key, lru_node.value) + for full_key, lru_node in iterate_tree_cache_items(key, node) + ) + else: + if update_metrics and metrics: + metrics.inc_misses() + return default + @synchronized def cache_set( key: KT, value: VT, callbacks: Collection[Callable[[], None]] = () @@ -674,6 +758,8 @@ def cache_contains(key: KT) -> bool: self.setdefault = cache_set_default self.pop = cache_pop self.del_multi = cache_del_multi + if cache_type is TreeCache: + self.get_multi = cache_get_multi # `invalidate` is exposed for consistency with DeferredCache, so that it can be # invalidated by the cache invalidation replication stream. self.invalidate = cache_del_multi @@ -730,3 +816,41 @@ def __del__(self) -> None: # This happens e.g. in the sync code where we have an expiring cache of # lru caches. self.clear() + + +class AsyncLruCache(Generic[KT, VT]): + """ + An asynchronous wrapper around a subset of the LruCache API. + + On its own this doesn't change the behaviour but allows subclasses that + utilize external cache systems that require await behaviour to be created. + """ + + def __init__(self, *args, **kwargs): # type: ignore + self._lru_cache: LruCache[KT, VT] = LruCache(*args, **kwargs) + + async def get( + self, key: KT, default: Optional[T] = None, update_metrics: bool = True + ) -> Optional[VT]: + return self._lru_cache.get(key, update_metrics=update_metrics) + + async def set(self, key: KT, value: VT) -> None: + self._lru_cache.set(key, value) + + async def invalidate(self, key: KT) -> None: + # This method should invalidate any external cache and then invalidate the LruCache. + return self._lru_cache.invalidate(key) + + def invalidate_local(self, key: KT) -> None: + """Remove an entry from the local cache + + This variant of `invalidate` is useful if we know that the external + cache has already been invalidated. + """ + return self._lru_cache.invalidate(key) + + async def contains(self, key: KT) -> bool: + return self._lru_cache.contains(key) + + async def clear(self) -> None: + self._lru_cache.clear() diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index e78305f78746..c1b8ec0c73eb 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -64,6 +64,15 @@ def set(self, key, value) -> None: self.size += 1 def get(self, key, default=None): + """When `key` is a full key, fetches the value for the given key (if + any). + + If `key` is only a partial key (i.e. a truncated tuple) then returns a + `TreeCacheNode`, which can be passed to the `iterate_tree_cache_*` + functions to iterate over all entries in the cache with keys that start + with the given partial key. + """ + node = self.root for k in key[:-1]: node = node.get(k, None) @@ -139,3 +148,32 @@ def iterate_tree_cache_entry(d): yield from iterate_tree_cache_entry(value_d) else: yield d + + +def iterate_tree_cache_items(key, value): + """Helper function to iterate over the leaves of a tree, i.e. a dict of that + can contain dicts. + + The provided key is a tuple that will get prepended to the returned keys. + + Example: + + cache = TreeCache() + cache[(1, 1)] = "a" + cache[(1, 2)] = "b" + cache[(2, 1)] = "c" + + tree_node = cache.get((1,)) + + items = iterate_tree_cache_items((1,), tree_node) + assert list(items) == [((1, 1), "a"), ((1, 2), "b")] + + Returns: + A generator yielding key/value pairs. + """ + if isinstance(value, TreeCacheNode): + for sub_key, sub_value in value.items(): + yield from iterate_tree_cache_items((*key, sub_key), sub_value) + else: + # we've reached a leaf of the tree. + yield key, value diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 18649c2c05dc..c86f783c5bd4 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -314,3 +314,77 @@ def consume_at(time: float) -> bool: # Check that we get rate limited after using that token. self.assertFalse(consume_at(11.1)) + + def test_record_action_which_doesnt_fill_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe two actions, leaving room in the bucket for one more. + limiter.record_action(requester=None, key="a", n_actions=2, _time_now_s=0.0) + + # We should be able to take a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertTrue(success) + + # ... but not two. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + def test_record_action_which_fills_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe three actions, filling up the bucket. + limiter.record_action(requester=None, key="a", n_actions=3, _time_now_s=0.0) + + # We should be unable to take a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + # If we wait 10 seconds to leak a token, we should be able to take one action... + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertTrue(success) + + # ... but not two. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertFalse(success) + + def test_record_action_which_overfills_bucket(self) -> None: + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + # Observe four actions, exceeding the bucket. + limiter.record_action(requester=None, key="a", n_actions=4, _time_now_s=0.0) + + # We should be prevented from taking a new action now. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=0.0) + ) + self.assertFalse(success) + + # If we wait 10 seconds to leak a token, we should be unable to take an action + # because the bucket is still full. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=10.0) + ) + self.assertFalse(success) + + # But after another 10 seconds we leak a second token, giving us room for + # action. + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=20.0) + ) + self.assertTrue(success) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 9f1115dd23b8..c6dd99316aa5 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from http import HTTPStatus from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -50,7 +51,7 @@ def test_complexity_simple(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) complexity = channel.json_body["v1"] self.assertTrue(complexity > 0, complexity) @@ -62,7 +63,7 @@ def test_complexity_simple(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 268a48d7ba5f..50e376f69574 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -22,6 +22,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.room_versions import RoomVersions +from synapse.events import EventBase from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -38,42 +39,46 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): self._mock_agent = mock.create_autospec(twisted.web.client.Agent, spec_set=True) homeserver.get_federation_http_client().agent = self._mock_agent - def test_get_room_state(self): - creator = f"@creator:{self.OTHER_SERVER_NAME}" - test_room_id = "!room_id" + # Move clock up to somewhat realistic time so the PDU destination retry + # works (`now` needs to be larger than `0 + PDU_RETRY_TIME_MS`). + self.reactor.advance(1000000000) + + self.creator = f"@creator:{self.OTHER_SERVER_NAME}" + self.test_room_id = "!room_id" + def test_get_room_state(self): # mock up some events to use in the response. # In real life, these would have things in `prev_events` and `auth_events`, but that's # a bit annoying to mock up, and the code under test doesn't care, so we don't bother. - create_event_dict = self.add_hashes_and_signatures( + create_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.create", "state_key": "", - "sender": creator, - "content": {"creator": creator}, + "sender": self.creator, + "content": {"creator": self.creator}, "prev_events": [], "auth_events": [], "origin_server_ts": 500, } ) - member_event_dict = self.add_hashes_and_signatures( + member_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.member", - "sender": creator, - "state_key": creator, + "sender": self.creator, + "state_key": self.creator, "content": {"membership": "join"}, "prev_events": [], "auth_events": [], "origin_server_ts": 600, } ) - pl_event_dict = self.add_hashes_and_signatures( + pl_event_dict = self.add_hashes_and_signatures_from_other_server( { - "room_id": test_room_id, + "room_id": self.test_room_id, "type": "m.room.power_levels", - "sender": creator, + "sender": self.creator, "state_key": "", "content": {}, "prev_events": [], @@ -102,8 +107,8 @@ def test_get_room_state(self): # now fire off the request state_resp, auth_resp = self.get_success( self.hs.get_federation_client().get_room_state( - "yet_another_server", - test_room_id, + "yet.another.server", + self.test_room_id, "event_id", RoomVersions.V9, ) @@ -112,7 +117,7 @@ def test_get_room_state(self): # check the right call got made to the agent self._mock_agent.request.assert_called_once_with( b"GET", - b"matrix://yet_another_server/_matrix/federation/v1/state/%21room_id?event_id=event_id", + b"matrix://yet.another.server/_matrix/federation/v1/state/%21room_id?event_id=event_id", headers=mock.ANY, bodyProducer=None, ) @@ -130,6 +135,102 @@ def test_get_room_state(self): ["m.room.create", "m.room.member", "m.room.power_levels"], ) + def test_get_pdu_returns_nothing_when_event_does_not_exist(self): + """No event should be returned when the event does not exist""" + remote_pdu = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + "event_should_not_exist", + RoomVersions.V9, + ) + ) + self.assertEqual(remote_pdu, None) + + def test_get_pdu(self): + """Test to make sure an event is returned by `get_pdu()`""" + self._get_pdu_once() + + def test_get_pdu_event_from_cache_is_pristine(self): + """Test that modifications made to events returned by `get_pdu()` + do not propagate back to to the internal cache (events returned should + be a copy). + """ + + # Get the PDU in the cache + remote_pdu = self._get_pdu_once() + + # Modify the the event reference. + # This change should not make it back to the `_get_pdu_cache`. + remote_pdu.internal_metadata.outlier = True + + # Get the event again. This time it should read it from cache. + remote_pdu2 = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + remote_pdu.event_id, + RoomVersions.V9, + ) + ) + + # Sanity check that we are working against the same event + self.assertEqual(remote_pdu.event_id, remote_pdu2.event_id) + + # Make sure the event does not include modification from earlier + self.assertIsNotNone(remote_pdu2) + self.assertEqual(remote_pdu2.internal_metadata.outlier, False) + + def _get_pdu_once(self) -> EventBase: + """Retrieve an event via `get_pdu()` and assert that an event was returned. + Also used to prime the cache for subsequent test logic. + """ + message_event_dict = self.add_hashes_and_signatures_from_other_server( + { + "room_id": self.test_room_id, + "type": "m.room.message", + "sender": self.creator, + "state_key": "", + "content": {}, + "prev_events": [], + "auth_events": [], + "origin_server_ts": 700, + "depth": 10, + } + ) + + # mock up the response, and have the agent return it + self._mock_agent.request.side_effect = lambda *args, **kwargs: defer.succeed( + _mock_response( + { + "origin": "yet.another.server", + "origin_server_ts": 900, + "pdus": [ + message_event_dict, + ], + } + ) + ) + + remote_pdu = self.get_success( + self.hs.get_federation_client().get_pdu( + ["yet.another.server"], + "event_id", + RoomVersions.V9, + ) + ) + + # check the right call got made to the agent + self._mock_agent.request.assert_called_once_with( + b"GET", + b"matrix://yet.another.server/_matrix/federation/v1/event/event_id", + headers=mock.ANY, + bodyProducer=None, + ) + + self.assertIsNotNone(remote_pdu) + self.assertEqual(remote_pdu.internal_metadata.outlier, False) + + return remote_pdu + def _mock_response(resp: JsonDict): body = json.dumps(resp).encode("utf-8") diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 413b3c9426f4..3a6ef221ae96 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from http import HTTPStatus from parameterized import parameterized @@ -20,7 +21,6 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import DEFAULT_ROOM_VERSION -from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin @@ -59,7 +59,7 @@ def test_bad_request(self, query_content): "/_matrix/federation/v1/get_missing_events/%s" % (room_1,), query_content, ) - self.assertEqual(400, channel.code, channel.result) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_JSON") @@ -120,7 +120,7 @@ def test_needs_to_be_in_room(self): channel = self.make_signed_federation_request( "GET", "/_matrix/federation/v1/state/%s?event_id=xyz" % (room_1,) ) - self.assertEqual(403, channel.code, channel.result) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") @@ -148,13 +148,13 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): tok2 = self.login("fozzie", "bear") self.helper.join(self._room_id, second_member_user_id, tok=tok2) - def _make_join(self, user_id) -> JsonDict: + def _make_join(self, user_id: str) -> JsonDict: channel = self.make_signed_federation_request( "GET", f"/_matrix/federation/v1/make_join/{self._room_id}/{user_id}" f"?ver={DEFAULT_ROOM_VERSION}", ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body def test_send_join(self): @@ -163,18 +163,16 @@ def test_send_join(self): join_result = self._make_join(joining_user) join_event_dict = join_result["event"] - add_hashes_and_signatures( - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + self.add_hashes_and_signatures_from_other_server( join_event_dict, - signature_name=self.OTHER_SERVER_NAME, - signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], ) channel = self.make_signed_federation_request( "PUT", f"/_matrix/federation/v2/send_join/{self._room_id}/x", content=join_event_dict, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # we should get complete room state back returned_state = [ @@ -220,18 +218,16 @@ def test_send_join_partial_state(self): join_result = self._make_join(joining_user) join_event_dict = join_result["event"] - add_hashes_and_signatures( - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + self.add_hashes_and_signatures_from_other_server( join_event_dict, - signature_name=self.OTHER_SERVER_NAME, - signing_key=self.OTHER_SERVER_SIGNATURE_KEY, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], ) channel = self.make_signed_federation_request( "PUT", f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", content=join_event_dict, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # expect a reduced room state returned_state = [ @@ -264,6 +260,67 @@ def test_send_join_partial_state(self): ) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + def test_make_join_respects_room_join_rate_limit(self) -> None: + # In the test setup, two users join the room. Since the rate limiter burst + # count is 3, a new make_join request to the room should be accepted. + + joining_user = "@ronniecorbett:" + self.OTHER_SERVER_NAME + self._make_join(joining_user) + + # Now have a new local user join the room. This saturates the rate limiter + # bucket, so the next make_join should be denied. + new_local_user = self.register_user("animal", "animal") + token = self.login("animal", "animal") + self.helper.join(self._room_id, new_local_user, tok=token) + + joining_user = "@ronniebarker:" + self.OTHER_SERVER_NAME + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/make_join/{self._room_id}/{joining_user}" + f"?ver={DEFAULT_ROOM_VERSION}", + ) + self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS, channel.json_body) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 3}}) + def test_send_join_contributes_to_room_join_rate_limit_and_is_limited(self) -> None: + # Make two make_join requests up front. (These are rate limited, but do not + # contribute to the rate limit.) + join_event_dicts = [] + for i in range(2): + joining_user = f"@misspiggy{i}:{self.OTHER_SERVER_NAME}" + join_result = self._make_join(joining_user) + join_event_dict = join_result["event"] + self.add_hashes_and_signatures_from_other_server( + join_event_dict, + KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + ) + join_event_dicts.append(join_event_dict) + + # In the test setup, two users join the room. Since the rate limiter burst + # count is 3, the first send_join should be accepted... + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/join0", + content=join_event_dicts[0], + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # ... but the second should be denied. + channel = self.make_signed_federation_request( + "PUT", + f"/_matrix/federation/v2/send_join/{self._room_id}/join1", + content=join_event_dicts[1], + ) + self.assertEqual(channel.code, HTTPStatus.TOO_MANY_REQUESTS, channel.json_body) + + # NB: we could write a test which checks that the send_join event is seen + # by other workers over replication, and that they update their rate limit + # buckets accordingly. I'm going to assume that the join event gets sent over + # replication, at which point the tests.handlers.room_member test + # test_local_users_joining_on_another_worker_contribute_to_rate_limit + # is probably sufficient to reassure that the bucket is updated. + def _create_acl_event(content): return make_event_from_dict( diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index d21c11b716cd..0d048207b755 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict +from http import HTTPStatus from typing import Dict, List from synapse.api.constants import EventTypes, JoinRules, Membership @@ -255,7 +256,7 @@ def test_room_state_returned_when_knocking(self): RoomVersions.V7.identifier, ), ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Note: We don't expect the knock membership event to be sent over federation as # part of the stripped room state, as the knocking homeserver already has that @@ -293,7 +294,7 @@ def test_room_state_returned_when_knocking(self): % (room_id, signed_knock_event.event_id), signed_knock_event_json, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Check that we got the stripped room state in return room_state_events = channel.json_body["knock_state_events"] diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d96d5aa1385e..b17af2725b3d 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -50,7 +50,7 @@ def setUp(self): self.mock_scheduler = Mock() hs = Mock() hs.get_datastores.return_value = Mock(main=self.mock_store) - self.mock_store.get_received_ts.return_value = make_awaitable(0) + self.mock_store.get_appservice_last_pos.return_value = make_awaitable(None) self.mock_store.set_appservice_last_pos.return_value = make_awaitable(None) self.mock_store.set_appservice_stream_type_pos.return_value = make_awaitable( None @@ -76,9 +76,9 @@ def test_notify_interested_services(self): event = Mock( sender="@someone:anywhere", type="m.room.message", room_id="!foo:bar" ) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [])), - make_awaitable((1, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [], {})), + make_awaitable((1, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 1)) @@ -95,8 +95,8 @@ def test_query_user_exists_unknown_user(self): event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) @@ -112,8 +112,8 @@ def test_query_user_exists_known_user(self): event = Mock(sender=user_id, type="m.room.message", room_id="!foo:bar") self.mock_as_api.query_user.return_value = make_awaitable(True) - self.mock_store.get_new_events_for_appservice.side_effect = [ - make_awaitable((0, [event])), + self.mock_store.get_all_new_events_stream.side_effect = [ + make_awaitable((0, [event], {event.event_id: 0})), ] self.handler.notify_interested_services(RoomStreamToken(None, 0)) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 9b9c11fab73d..8a0bb91f40ce 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List, cast +from typing import cast from unittest import TestCase from twisted.test.proto_helpers import MemoryReactor @@ -50,8 +50,6 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver(federation_http_client=None) self.handler = hs.get_federation_handler() self.store = hs.get_datastores().main - self.state_storage_controller = hs.get_storage_controllers().state - self._event_auth_handler = hs.get_event_auth_handler() return hs def test_exchange_revoked_invite(self) -> None: @@ -256,7 +254,7 @@ def test_backfill_with_many_backward_extremities(self) -> None: ] for _ in range(0, 8): event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "origin_server_ts": 1, "type": "m.room.message", @@ -314,142 +312,6 @@ def test_backfill_with_many_backward_extremities(self) -> None: ) self.get_success(d) - def test_backfill_floating_outlier_membership_auth(self) -> None: - """ - As the local homeserver, check that we can properly process a federated - event from the OTHER_SERVER with auth_events that include a floating - membership event from the OTHER_SERVER. - - Regression test, see #10439. - """ - OTHER_SERVER = "otherserver" - OTHER_USER = "@otheruser:" + OTHER_SERVER - - # create the room - user_id = self.register_user("kermit", "test") - tok = self.login("kermit", "test") - room_id = self.helper.create_room_as( - room_creator=user_id, - is_public=True, - tok=tok, - extra_content={ - "preset": "public_chat", - }, - ) - room_version = self.get_success(self.store.get_room_version(room_id)) - - prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) - ( - most_recent_prev_event_id, - most_recent_prev_event_depth, - ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) - # mapping from (type, state_key) -> state_event_id - assert most_recent_prev_event_id is not None - prev_state_map = self.get_success( - self.state_storage_controller.get_state_ids_for_event( - most_recent_prev_event_id - ) - ) - # List of state event ID's - prev_state_ids = list(prev_state_map.values()) - auth_event_ids = prev_state_ids - auth_events = list( - self.get_success(self.store.get_events(auth_event_ids)).values() - ) - - # build a floating outlier member state event - fake_prev_event_id = "$" + random_string(43) - member_event_dict = { - "type": EventTypes.Member, - "content": { - "membership": "join", - }, - "state_key": OTHER_USER, - "room_id": room_id, - "sender": OTHER_USER, - "depth": most_recent_prev_event_depth, - "prev_events": [fake_prev_event_id], - "origin_server_ts": self.clock.time_msec(), - "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, - } - builder = self.hs.get_event_builder_factory().for_room_version( - room_version, member_event_dict - ) - member_event = self.get_success( - builder.build( - prev_event_ids=member_event_dict["prev_events"], - auth_event_ids=self._event_auth_handler.compute_auth_events( - builder, - prev_state_map, - for_verification=False, - ), - depth=member_event_dict["depth"], - ) - ) - # Override the signature added from "test" homeserver that we created the event with - member_event.signatures = member_event_dict["signatures"] - - # Add the new member_event to the StateMap - updated_state_map = dict(prev_state_map) - updated_state_map[ - (member_event.type, member_event.state_key) - ] = member_event.event_id - auth_events.append(member_event) - - # build and send an event authed based on the member event - message_event_dict = { - "type": EventTypes.Message, - "content": {}, - "room_id": room_id, - "sender": OTHER_USER, - "depth": most_recent_prev_event_depth, - "prev_events": prev_event_ids.copy(), - "origin_server_ts": self.clock.time_msec(), - "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, - } - builder = self.hs.get_event_builder_factory().for_room_version( - room_version, message_event_dict - ) - message_event = self.get_success( - builder.build( - prev_event_ids=message_event_dict["prev_events"], - auth_event_ids=self._event_auth_handler.compute_auth_events( - builder, - updated_state_map, - for_verification=False, - ), - depth=message_event_dict["depth"], - ) - ) - # Override the signature added from "test" homeserver that we created the event with - message_event.signatures = message_event_dict["signatures"] - - # Stub the /event_auth response from the OTHER_SERVER - async def get_event_auth( - destination: str, room_id: str, event_id: str - ) -> List[EventBase]: - return [ - event_from_pdu_json(ae.get_pdu_json(), room_version=room_version) - for ae in auth_events - ] - - self.handler.federation_client.get_event_auth = get_event_auth # type: ignore[assignment] - - with LoggingContext("receive_pdu"): - # Fake the OTHER_SERVER federating the message event over to our local homeserver - d = run_in_background( - self.hs.get_federation_event_handler().on_receive_pdu, - OTHER_SERVER, - message_event, - ) - self.get_success(d) - - # Now try and get the events on our local homeserver - stored_event = self.get_success( - self.store.get_event(message_event.event_id, allow_none=True) - ) - self.assertTrue(stored_event is not None) - @unittest.override_config( {"rc_invites": {"per_user": {"per_second": 0.5, "burst_count": 3}}} ) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 4b1a8f04dbde..51c8dd649822 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -104,7 +104,7 @@ def _test_process_pulled_event_with_missing_state( # mock up a load of state events which we are missing state_events = [ make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_state_type", "state_key": f"state_{i}", @@ -131,7 +131,7 @@ def _test_process_pulled_event_with_missing_state( # Depending on the test, we either persist this upfront (as an outlier), # or let the server request it. prev_event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_regular_type", "room_id": room_id, @@ -165,7 +165,7 @@ async def get_event(destination: str, event_id: str, timeout=None): # mock up a regular event to pass into _process_pulled_event pulled_event = make_event_from_dict( - self.add_hashes_and_signatures( + self.add_hashes_and_signatures_from_other_server( { "type": "test_regular_type", "room_id": room_id, diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 82b3bb3b735d..4c62449c898e 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -14,6 +14,7 @@ """Tests for the password_auth_provider interface""" +from http import HTTPStatus from typing import Any, Type, Union from unittest.mock import Mock @@ -188,14 +189,14 @@ def password_only_auth_provider_login_test_body(self): # check_password must return an awaitable mock_password_provider.check_password.return_value = make_awaitable(True) channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@u:test", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with("@u:test", "p") mock_password_provider.reset_mock() # login with mxid should work too channel = self._send_password_login("@u:bz", "p") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@u:bz", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with("@u:bz", "p") mock_password_provider.reset_mock() @@ -204,7 +205,7 @@ def password_only_auth_provider_login_test_body(self): # in these cases, but at least we can guard against the API changing # unexpectedly channel = self._send_password_login(" USER🙂NAME ", " pASS\U0001F622word ") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@ USER🙂NAME :test", channel.json_body["user_id"]) mock_password_provider.check_password.assert_called_once_with( "@ USER🙂NAME :test", " pASS😢word " @@ -258,10 +259,10 @@ def local_user_fallback_login_test_body(self): # check_password must return an awaitable mock_password_provider.check_password.return_value = make_awaitable(False) channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@localuser:test", channel.json_body["user_id"]) @override_config(legacy_providers_config(LegacyPasswordOnlyAuthProvider)) @@ -382,7 +383,7 @@ def password_auth_disabled_test_body(self): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("u", "p") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_password.assert_not_called() @override_config(legacy_providers_config(LegacyCustomAuthProvider)) @@ -406,14 +407,14 @@ def custom_auth_provider_login_test_body(self): # login with missing param should be rejected channel = self._send_login("test.login_type", "u") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_auth.return_value = make_awaitable( ("@user:bz", None) ) channel = self._send_login("test.login_type", "u", test_field="y") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@user:bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} @@ -427,7 +428,7 @@ def custom_auth_provider_login_test_body(self): ("@ MALFORMED! :bz", None) ) channel = self._send_login("test.login_type", " USER🙂NAME ", test_field=" abc ") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@ MALFORMED! :bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( " USER🙂NAME ", "test.login_type", {"test_field": " abc "} @@ -510,7 +511,7 @@ def custom_auth_provider_callback_test_body(self): ("@user:bz", callback) ) channel = self._send_login("test.login_type", "u", test_field="y") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertEqual("@user:bz", channel.json_body["user_id"]) mock_password_provider.check_auth.assert_called_once_with( "u", "test.login_type", {"test_field": "y"} @@ -549,7 +550,7 @@ def custom_auth_password_disabled_test_body(self): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() @override_config( @@ -584,7 +585,7 @@ def custom_auth_password_disabled_localdb_enabled_test_body(self): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() @override_config( @@ -615,7 +616,7 @@ def password_custom_auth_password_disabled_login_test_body(self): # login shouldn't work and should be rejected with a 400 ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) mock_password_provider.check_auth.assert_not_called() mock_password_provider.check_password.assert_not_called() @@ -646,13 +647,13 @@ def password_custom_auth_password_disabled_ui_auth_test_body(self): ("@localuser:test", None) ) channel = self._send_login("test.login_type", "localuser", test_field="") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) tok1 = channel.json_body["access_token"] channel = self._send_login( "test.login_type", "localuser", test_field="", device_id="dev2" ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # make the initial request which returns a 401 channel = self._delete_device(tok1, "dev2") @@ -721,7 +722,7 @@ def custom_auth_no_local_user_fallback_test_body(self): # password login shouldn't work and should be rejected with a 400 # ("unknown login type") channel = self._send_password_login("localuser", "localpass") - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) def test_on_logged_out(self): """Tests that the on_logged_out callback is called when the user logs out.""" @@ -884,7 +885,7 @@ def _test_3pid_allowed(self, username: str, registration: bool): }, access_token=tok, ) - self.assertEqual(channel.code, 403, channel.result) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) self.assertEqual( channel.json_body["errcode"], Codes.THREEPID_DENIED, @@ -906,7 +907,7 @@ def _test_3pid_allowed(self, username: str, registration: bool): }, access_token=tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.assertIn("sid", channel.json_body) m.assert_called_once_with("email", "bar@test.com", registration) @@ -949,12 +950,12 @@ def _do_uia_assert_mock_not_called(self, username: str, m: Mock) -> JsonDict: "register", {"auth": {"session": session, "type": LoginType.DUMMY}}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body def _get_login_flows(self) -> JsonDict: channel = self.make_request("GET", "/_matrix/client/r0/login") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) return channel.json_body["flows"] def _send_password_login(self, user: str, password: str) -> FakeChannel: diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py new file mode 100644 index 000000000000..254e7e4b80ac --- /dev/null +++ b/tests/handlers/test_room_member.py @@ -0,0 +1,290 @@ +from http import HTTPStatus +from unittest.mock import Mock, patch + +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +import synapse.rest.client.login +import synapse.rest.client.room +from synapse.api.constants import EventTypes, Membership +from synapse.api.errors import LimitExceededError +from synapse.crypto.event_signing import add_hashes_and_signatures +from synapse.events import FrozenEventV3 +from synapse.federation.federation_client import SendJoinResult +from synapse.server import HomeServer +from synapse.types import UserID, create_requester +from synapse.util import Clock + +from tests.replication._base import RedisMultiWorkerStreamTestCase +from tests.server import make_request +from tests.test_utils import make_awaitable +from tests.unittest import FederatingHomeserverTestCase, override_config + + +class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + + # Create three users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + self.chris = self.register_user("chris", "pass") + self.chris_token = self.login("chris", "pass") + + # Create a room on this homeserver. Note that this counts as a join: it + # contributes to the rate limter's count of actions + self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}" + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_user_local_joins_contribute_to_limit_and_are_limited(self) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. + # Try joining the room as Bob. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.bob), + target=UserID.from_string(self.bob), + room_id=self.room_id, + action=Membership.JOIN, + ) + ) + + # The rate limiter bucket is full. A second join should be denied. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_user_profile_edits_dont_contribute_to_limit(self) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. Alice should still be able to change her displayname. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.alice), + room_id=self.room_id, + action=Membership.JOIN, + content={"displayname": "Alice Cooper"}, + ) + ) + + # Still room in the limiter bucket. Chris's join should be accepted. + self.get_success( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ) + ) + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 1}}) + def test_remote_joins_contribute_to_rate_limit(self) -> None: + # Join once, to fill the rate limiter bucket. + # + # To do this we have to mock the responses from the remote homeserver. + # We also patch out a bunch of event checks on our end. All we're really + # trying to check here is that remote joins will bump the rate limter when + # they are persisted. + create_event_source = { + "auth_events": [], + "content": { + "creator": f"@creator:{self.OTHER_SERVER_NAME}", + "room_version": self.hs.config.server.default_room_version.identifier, + }, + "depth": 0, + "origin_server_ts": 0, + "prev_events": [], + "room_id": self.intially_unjoined_room_id, + "sender": f"@creator:{self.OTHER_SERVER_NAME}", + "state_key": "", + "type": EventTypes.Create, + } + self.add_hashes_and_signatures_from_other_server( + create_event_source, + self.hs.config.server.default_room_version, + ) + create_event = FrozenEventV3( + create_event_source, + self.hs.config.server.default_room_version, + {}, + None, + ) + + join_event_source = { + "auth_events": [create_event.event_id], + "content": {"membership": "join"}, + "depth": 1, + "origin_server_ts": 100, + "prev_events": [create_event.event_id], + "sender": self.bob, + "state_key": self.bob, + "room_id": self.intially_unjoined_room_id, + "type": EventTypes.Member, + } + add_hashes_and_signatures( + self.hs.config.server.default_room_version, + join_event_source, + self.hs.hostname, + self.hs.signing_key, + ) + join_event = FrozenEventV3( + join_event_source, + self.hs.config.server.default_room_version, + {}, + None, + ) + + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + self.OTHER_SERVER_NAME, + join_event, + self.hs.config.server.default_room_version, + ) + ) + ) + mock_send_join = Mock( + return_value=make_awaitable( + SendJoinResult( + join_event, + self.OTHER_SERVER_NAME, + state=[create_event], + auth_chain=[create_event], + partial_state=False, + servers_in_room=[], + ) + ) + ) + + with patch.object( + self.handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), patch.object( + self.handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ): + self.get_success( + self.handler.update_membership( + requester=create_requester(self.bob), + target=UserID.from_string(self.bob), + room_id=self.intially_unjoined_room_id, + action=Membership.JOIN, + remote_room_hosts=[self.OTHER_SERVER_NAME], + ) + ) + + # Try to join as Chris. Should get denied. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.intially_unjoined_room_id, + action=Membership.JOIN, + remote_room_hosts=[self.OTHER_SERVER_NAME], + ), + LimitExceededError, + ) + + # TODO: test that remote joins to a room are rate limited. + # Could do this by setting the burst count to 1, then: + # - remote-joining a room + # - immediately leaving + # - trying to remote-join again. + + +class TestReplicatedJoinsLimitedByPerRoomRateLimiter(RedisMultiWorkerStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + + # Create three users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + self.chris = self.register_user("chris", "pass") + self.chris_token = self.login("chris", "pass") + + # Create a room on this homeserver. + # Note that this counts as a + self.room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + self.intially_unjoined_room_id = "!example:otherhs" + + @override_config({"rc_joins_per_room": {"per_second": 0, "burst_count": 2}}) + def test_local_users_joining_on_another_worker_contribute_to_rate_limit( + self, + ) -> None: + # The rate limiter has accumulated one token from Alice's join after the create + # event. + self.replicate() + + # Spawn another worker and have bob join via it. + worker_app = self.make_worker_hs( + "synapse.app.generic_worker", extra_config={"worker_name": "other worker"} + ) + worker_site = self._hs_to_site[worker_app] + channel = make_request( + self.reactor, + worker_site, + "POST", + f"/_matrix/client/v3/rooms/{self.room_id}/join", + access_token=self.bob_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + + # wait for join to arrive over replication + self.replicate() + + # Try to join as Chris on the worker. Should get denied because Alice + # and Bob have both joined the room. + self.get_failure( + worker_app.get_room_member_handler().update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) + + # Try to join as Chris on the original worker. Should get denied because Alice + # and Bob have both joined the room. + self.get_failure( + self.handler.update_membership( + requester=create_requester(self.chris), + target=UserID.from_string(self.chris), + room_id=self.room_id, + action=Membership.JOIN, + ), + LimitExceededError, + ) diff --git a/tests/handlers/test_send_email.py b/tests/handlers/test_send_email.py index 6f77b1237c97..da4bf8b5829a 100644 --- a/tests/handlers/test_send_email.py +++ b/tests/handlers/test_send_email.py @@ -23,7 +23,7 @@ from twisted.mail import interfaces, smtp from tests.server import FakeTransport -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config @implementer(interfaces.IMessageDelivery) @@ -110,3 +110,58 @@ def test_send_email(self): user, msg = message_delivery.messages.pop() self.assertEqual(str(user), "foo@bar.com") self.assertIn(b"Subject: test subject", msg) + + @override_config( + { + "email": { + "notif_from": "noreply@test", + "force_tls": True, + }, + } + ) + def test_send_email_force_tls(self): + """Happy-path test that we can send email to an Implicit TLS server.""" + h = self.hs.get_send_email_handler() + d = ensureDeferred( + h.send_email( + "foo@bar.com", "test subject", "Tests", "HTML content", "Text content" + ) + ) + # there should be an attempt to connect to localhost:465 + self.assertEqual(len(self.reactor.sslClients), 1) + ( + host, + port, + client_factory, + contextFactory, + _timeout, + _bindAddress, + ) = self.reactor.sslClients[0] + self.assertEqual(host, "localhost") + self.assertEqual(port, 465) + + # wire it up to an SMTP server + message_delivery = _DummyMessageDelivery() + server_protocol = smtp.ESMTP() + server_protocol.delivery = message_delivery + # make sure that the server uses the test reactor to set timeouts + server_protocol.callLater = self.reactor.callLater # type: ignore[assignment] + + client_protocol = client_factory.buildProtocol(None) + client_protocol.makeConnection(FakeTransport(server_protocol, self.reactor)) + server_protocol.makeConnection( + FakeTransport( + client_protocol, + self.reactor, + peer_address=IPv4Address("TCP", "127.0.0.1", 1234), + ) + ) + + # the message should now get delivered + self.get_success(d, by=0.1) + + # check it arrived + self.assertEqual(len(message_delivery.messages), 1) + user, msg = message_delivery.messages.pop() + self.assertEqual(str(user), "foo@bar.com") + self.assertIn(b"Subject: test subject", msg) diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index ecc7cc6461ee..e3f38fbcc5ce 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -159,7 +159,7 @@ def test_unknown_room_version(self): # Blow away caches (supported room versions can only change due to a restart). self.store.get_rooms_for_user_with_stream_ordering.invalidate_all() - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) self.store._event_ref.clear() # The rooms should be excluded from the sync response. diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 40148d503cfb..3b14c76d7e65 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import cast + from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactorClock @@ -40,6 +42,15 @@ class LogContextScopeManagerTestCase(TestCase): + """ + Test logging contexts and active opentracing spans. + + There's casts throughout this from generic opentracing objects (e.g. + opentracing.Span) to the ones specific to Jaeger since they have additional + properties that these tests depend on. This is safe since the only supported + opentracing backend is Jaeger. + """ + if LogContextScopeManager is None: skip = "Requires opentracing" # type: ignore[unreachable] if jaeger_client is None: @@ -69,7 +80,7 @@ def test_start_active_span(self) -> None: # start_active_span should start and activate a span. scope = start_active_span("span", tracer=self._tracer) - span = scope.span + span = cast(jaeger_client.Span, scope.span) self.assertEqual(self._tracer.active_span, span) self.assertIsNotNone(span.start_time) @@ -91,6 +102,7 @@ def test_nested_spans(self) -> None: with LoggingContext("root context"): with start_active_span("root span", tracer=self._tracer) as root_scope: self.assertEqual(self._tracer.active_span, root_scope.span) + root_context = cast(jaeger_client.SpanContext, root_scope.span.context) scope1 = start_active_span( "child1", @@ -99,9 +111,8 @@ def test_nested_spans(self) -> None: self.assertEqual( self._tracer.active_span, scope1.span, "child1 was not activated" ) - self.assertEqual( - scope1.span.context.parent_id, root_scope.span.context.span_id - ) + context1 = cast(jaeger_client.SpanContext, scope1.span.context) + self.assertEqual(context1.parent_id, root_context.span_id) scope2 = start_active_span_follows_from( "child2", @@ -109,17 +120,18 @@ def test_nested_spans(self) -> None: tracer=self._tracer, ) self.assertEqual(self._tracer.active_span, scope2.span) - self.assertEqual( - scope2.span.context.parent_id, scope1.span.context.span_id - ) + context2 = cast(jaeger_client.SpanContext, scope2.span.context) + self.assertEqual(context2.parent_id, context1.span_id) with scope1, scope2: pass # the root scope should be restored self.assertEqual(self._tracer.active_span, root_scope.span) - self.assertIsNotNone(scope2.span.end_time) - self.assertIsNotNone(scope1.span.end_time) + span2 = cast(jaeger_client.Span, scope2.span) + span1 = cast(jaeger_client.Span, scope1.span) + self.assertIsNotNone(span2.end_time) + self.assertIsNotNone(span1.end_time) self.assertIsNone(self._tracer.active_span) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 9b623d0033cd..718f48957748 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -16,13 +16,23 @@ import frozendict +from twisted.test.proto_helpers import MemoryReactor + +import synapse.rest.admin +from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions +from synapse.appservice import ApplicationService from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator from synapse.push.push_rule_evaluator import PushRuleEvaluatorForEvent +from synapse.rest.client import login, register, room +from synapse.server import HomeServer +from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest +from tests.test_utils.event_injection import create_event, inject_member_event class PushRuleEvaluatorTestCase(unittest.TestCase): @@ -354,3 +364,78 @@ def test_relation_match(self) -> None: "event_type": "*.reaction", } self.assertTrue(evaluator.matches(condition, "@user:test", "foo")) + + +class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase): + """Tests for the bulk push rule evaluator""" + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + register.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + # Define an application service so that we can register appservice users + self._service_token = "some_token" + self._service = ApplicationService( + self._service_token, + "as1", + "@as.sender:test", + namespaces={ + "users": [ + {"regex": "@_as_.*:test", "exclusive": True}, + {"regex": "@as.sender:test", "exclusive": True}, + ] + }, + msc3202_transaction_extensions=True, + ) + self.hs.get_datastores().main.services_cache = [self._service] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [self._service] + ) + + self._as_user, _ = self.register_appservice_user( + "_as_user", self._service_token + ) + + self.evaluator = self.hs.get_bulk_push_rule_evaluator() + + def test_ignore_appservice_users(self) -> None: + "Test that we don't generate push for appservice users" + + user_id = self.register_user("user", "pass") + token = self.login("user", "pass") + + room_id = self.helper.create_room_as(user_id, tok=token) + self.get_success( + inject_member_event(self.hs, room_id, self._as_user, Membership.JOIN) + ) + + event, context = self.get_success( + create_event( + self.hs, + type=EventTypes.Message, + room_id=room_id, + sender=user_id, + content={"body": "test", "msgtype": "m.text"}, + ) + ) + + # Assert the returned push rules do not contain the app service user + rules = self.get_success(self.evaluator._get_rules_for_event(event)) + self.assertTrue(self._as_user not in rules) + + # Assert that no push actions have been added to the staging table (the + # sender should not be pushed for the event) + users_with_push_actions = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_onecol( + table="event_push_actions_staging", + keyvalues={"event_id": event.event_id}, + retcol="user_id", + desc="test_ignore_appservice_users", + ) + ) + + self.assertEqual(len(users_with_push_actions), 0) diff --git a/tests/replication/slave/storage/test_account_data.py b/tests/replication/slave/storage/test_account_data.py deleted file mode 100644 index 1524087c43d4..000000000000 --- a/tests/replication/slave/storage/test_account_data.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.slave.storage.account_data import SlavedAccountDataStore - -from ._base import BaseSlavedStoreTestCase - -USER_ID = "@feeling:blue" -TYPE = "my.type" - - -class SlavedAccountDataStoreTestCase(BaseSlavedStoreTestCase): - - STORE_TYPE = SlavedAccountDataStore - - def test_user_account_data(self): - self.get_success( - self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 1}) - ) - self.replicate() - self.check( - "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 1} - ) - - self.get_success( - self.master_store.add_account_data_for_user(USER_ID, TYPE, {"a": 2}) - ) - self.replicate() - self.check( - "get_global_account_data_by_type_for_user", [USER_ID, TYPE], {"a": 2} - ) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 230dc76f723c..2526136ff813 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -21,7 +21,7 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, RoomTypes from synapse.api.errors import Codes from synapse.handlers.pagination import PaginationHandler from synapse.rest.client import directory, events, login, room @@ -1130,6 +1130,8 @@ def test_list_rooms(self) -> None: self.assertIn("guest_access", r) self.assertIn("history_visibility", r) self.assertIn("state_events", r) + self.assertIn("room_type", r) + self.assertIsNone(r["room_type"]) # Check that the correct number of total rooms was returned self.assertEqual(channel.json_body["total_rooms"], total_rooms) @@ -1229,7 +1231,11 @@ def test_list_rooms_pagination(self) -> None: def test_correct_room_attributes(self) -> None: """Test the correct attributes for a room are returned""" # Create a test room - room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok) + room_id = self.helper.create_room_as( + self.admin_user, + tok=self.admin_user_tok, + extra_content={"creation_content": {"type": RoomTypes.SPACE}}, + ) test_alias = "#test:test" test_room_name = "something" @@ -1306,6 +1312,7 @@ def test_correct_room_attributes(self) -> None: self.assertEqual(room_id, r["room_id"]) self.assertEqual(test_room_name, r["name"]) self.assertEqual(test_alias, r["canonical_alias"]) + self.assertEqual(RoomTypes.SPACE, r["room_type"]) def test_room_list_sort_order(self) -> None: """Test room list sort ordering. alphabetical name versus number of members, @@ -1630,7 +1637,7 @@ def test_single_room(self) -> None: self.assertIn("guest_access", channel.json_body) self.assertIn("history_visibility", channel.json_body) self.assertIn("state_events", channel.json_body) - + self.assertIn("room_type", channel.json_body) self.assertEqual(room_id_1, channel.json_body["room_id"]) def test_single_room_devices(self) -> None: diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index e32aaadb9858..12db68d564d7 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1379,7 +1379,7 @@ def test_create_server_admin(self) -> None: content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1434,7 +1434,7 @@ def test_create_user(self) -> None: content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("Bob's name", channel.json_body["displayname"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) @@ -1512,7 +1512,7 @@ def test_create_user_mau_limit_reached_active_admin(self) -> None: content={"password": "abc123", "admin": False}, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1550,7 +1550,7 @@ def test_create_user_mau_limit_reached_passive_admin(self) -> None: ) # Admin user is not blocked by mau anymore - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertFalse(channel.json_body["admin"]) @@ -1585,7 +1585,7 @@ def test_create_user_email_notif_for_new_users(self) -> None: content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1626,7 +1626,7 @@ def test_create_user_email_no_notif_for_new_users(self) -> None: content=body, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual("bob@bob.bob", channel.json_body["threepids"][0]["address"]) @@ -1636,6 +1636,41 @@ def test_create_user_email_no_notif_for_new_users(self) -> None: ) self.assertEqual(len(pushers), 0) + @override_config( + { + "email": { + "enable_notifs": True, + "notif_for_new_users": True, + "notif_from": "test@example.com", + }, + "public_baseurl": "https://example.com", + } + ) + def test_create_user_email_notif_for_new_users_with_msisdn_threepid(self) -> None: + """ + Check that a new regular user is created successfully when they have a msisdn + threepid and email notif_for_new_users is set to True. + """ + url = self.url_prefix % "@bob:test" + + # Create user + body = { + "password": "abc123", + "threepids": [{"medium": "msisdn", "address": "1234567890"}], + } + + channel = self.make_request( + "PUT", + url, + access_token=self.admin_user_tok, + content=body, + ) + + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) + self.assertEqual("@bob:test", channel.json_body["name"]) + self.assertEqual("msisdn", channel.json_body["threepids"][0]["medium"]) + self.assertEqual("1234567890", channel.json_body["threepids"][0]["address"]) + def test_set_password(self) -> None: """ Test setting a new password for another user. @@ -2372,7 +2407,7 @@ def test_accidental_deactivation_prevention(self) -> None: content={"password": "abc123"}, ) - self.assertEqual(201, channel.code, msg=channel.json_body) + self.assertEqual(HTTPStatus.CREATED, channel.code, msg=channel.json_body) self.assertEqual("@bob:test", channel.json_body["name"]) self.assertEqual("bob", channel.json_body["displayname"]) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 1f9b65351ed4..7ae926dc9c69 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -11,10 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import os import re from email.parser import Parser +from http import HTTPStatus from typing import Any, Dict, List, Optional, Union from unittest.mock import Mock @@ -95,10 +95,8 @@ def attempt_wrong_password_login(self, username: str, password: str) -> None: """ body = {"type": "m.login.password", "user": username, "password": password} - channel = self.make_request( - "POST", "/_matrix/client/r0/login", json.dumps(body).encode("utf8") - ) - self.assertEqual(channel.code, 403, channel.result) + channel = self.make_request("POST", "/_matrix/client/r0/login", body) + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) def test_basic_password_reset(self) -> None: """Test basic password reset flow""" @@ -347,7 +345,7 @@ def _validate_token(self, link: str) -> None: shorthand=False, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # Now POST to the same endpoint, mimicking the same behaviour as clicking the # password reset confirm button @@ -362,7 +360,7 @@ def _validate_token(self, link: str) -> None: shorthand=False, content_is_form=True, ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" @@ -390,7 +388,7 @@ def _reset_password( new_password: str, session_id: str, client_secret: str, - expected_code: int = 200, + expected_code: int = HTTPStatus.OK, ) -> None: channel = self.make_request( "POST", @@ -479,16 +477,14 @@ def test_pending_invites(self) -> None: self.assertEqual(memberships[0].room_id, room_id, memberships) def deactivate(self, user_id: str, tok: str) -> None: - request_data = json.dumps( - { - "auth": { - "type": "m.login.password", - "user": user_id, - "password": "test", - }, - "erase": False, - } - ) + request_data = { + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "test", + }, + "erase": False, + } channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) @@ -715,7 +711,9 @@ def test_add_email_if_disabled(self) -> None: }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -725,7 +723,7 @@ def test_add_email_if_disabled(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_delete_email(self) -> None: @@ -747,7 +745,7 @@ def test_delete_email(self) -> None: {"medium": "email", "address": self.email}, access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -756,7 +754,7 @@ def test_delete_email(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_delete_email_if_disabled(self) -> None: @@ -781,7 +779,9 @@ def test_delete_email_if_disabled(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) # Get user @@ -791,7 +791,7 @@ def test_delete_email_if_disabled(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) self.assertEqual(self.email, channel.json_body["threepids"][0]["address"]) @@ -817,7 +817,9 @@ def test_cant_add_email_without_clicking_link(self) -> None: }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -827,7 +829,7 @@ def test_cant_add_email_without_clicking_link(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) def test_no_valid_token(self) -> None: @@ -852,7 +854,9 @@ def test_no_valid_token(self) -> None: }, access_token=self.user_id_tok, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(Codes.THREEPID_AUTH_FAILED, channel.json_body["errcode"]) # Get user @@ -862,7 +866,7 @@ def test_no_valid_token(self) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertFalse(channel.json_body["threepids"]) @override_config({"next_link_domain_whitelist": None}) @@ -872,7 +876,7 @@ def test_next_link(self) -> None: "something@example.com", "some_secret", next_link="https://example.com/a/good/site", - expect_code=200, + expect_code=HTTPStatus.OK, ) @override_config({"next_link_domain_whitelist": None}) @@ -884,7 +888,7 @@ def test_next_link_exotic_protocol(self) -> None: "something@example.com", "some_secret", next_link="some-protocol://abcdefghijklmopqrstuvwxyz", - expect_code=200, + expect_code=HTTPStatus.OK, ) @override_config({"next_link_domain_whitelist": None}) @@ -895,7 +899,7 @@ def test_next_link_file_uri(self) -> None: "something@example.com", "some_secret", next_link="file:///host/path", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) @@ -907,28 +911,28 @@ def test_next_link_domain_whitelist(self) -> None: "something@example.com", "some_secret", next_link=None, - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://example.com/some/good/page", - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://example.org/some/also/good/page", - expect_code=200, + expect_code=HTTPStatus.OK, ) self._request_token( "something@example.com", "some_secret", next_link="https://bad.example.org/some/bad/page", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) @override_config({"next_link_domain_whitelist": []}) @@ -940,7 +944,7 @@ def test_empty_next_link_domain_whitelist(self) -> None: "something@example.com", "some_secret", next_link="https://example.com/a/page", - expect_code=400, + expect_code=HTTPStatus.BAD_REQUEST, ) def _request_token( @@ -948,7 +952,7 @@ def _request_token( email: str, client_secret: str, next_link: Optional[str] = None, - expect_code: int = 200, + expect_code: int = HTTPStatus.OK, ) -> Optional[str]: """Request a validation token to add an email address to a user's account @@ -993,7 +997,9 @@ def _request_token_invalid_email( b"account/3pid/email/requestToken", {"client_secret": client_secret, "email": email, "send_attempt": 1}, ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) self.assertEqual(expected_errcode, channel.json_body["errcode"]) self.assertEqual(expected_error, channel.json_body["error"]) @@ -1002,7 +1008,7 @@ def _validate_token(self, link: str) -> None: path = link.replace("https://example.com", "") channel = self.make_request("GET", path, shorthand=False) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) def _get_link_from_email(self) -> str: assert self.email_attempts, "No emails have been sent" @@ -1052,7 +1058,7 @@ def _add_email(self, request_email: str, expected_email: str) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # Get user channel = self.make_request( @@ -1061,7 +1067,7 @@ def _add_email(self, request_email: str, expected_email: str) -> None: access_token=self.user_id_tok, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual("email", channel.json_body["threepids"][0]["medium"]) threepids = {threepid["address"] for threepid in channel.json_body["threepids"]} @@ -1092,7 +1098,7 @@ def test_missing_mxid(self) -> None: """Tests that not providing any MXID raises an error.""" self._test_status( users=None, - expected_status_code=400, + expected_status_code=HTTPStatus.BAD_REQUEST, expected_errcode=Codes.MISSING_PARAM, ) @@ -1100,7 +1106,7 @@ def test_invalid_mxid(self) -> None: """Tests that providing an invalid MXID raises an error.""" self._test_status( users=["bad:test"], - expected_status_code=400, + expected_status_code=HTTPStatus.BAD_REQUEST, expected_errcode=Codes.INVALID_PARAM, ) @@ -1286,7 +1292,7 @@ async def is_expired(user_id: str) -> bool: def _test_status( self, users: Optional[List[str]], - expected_status_code: int = 200, + expected_status_code: int = HTTPStatus.OK, expected_statuses: Optional[Dict[str, Dict[str, bool]]] = None, expected_failures: Optional[List[str]] = None, expected_errcode: Optional[str] = None, diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index 16e7ef41bc15..7a88aa2cda7f 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -97,8 +96,7 @@ def test_room_creation_too_long(self) -> None: # We use deliberately a localpart under the length threshold so # that we can make sure that the check is done on the whole alias. - data = {"room_alias_name": random_string(256 - len(self.hs.hostname))} - request_data = json.dumps(data) + request_data = {"room_alias_name": random_string(256 - len(self.hs.hostname))} channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) @@ -110,8 +108,7 @@ def test_room_creation(self) -> None: # Check with an alias of allowed length. There should already be # a test that ensures it works in test_register.py, but let's be # as cautious as possible here. - data = {"room_alias_name": random_string(5)} - request_data = json.dumps(data) + request_data = {"room_alias_name": random_string(5)} channel = self.make_request( "POST", url, request_data, access_token=self.user_tok ) @@ -144,8 +141,7 @@ def test_deleting_alias_via_directory_appservice(self) -> None: # Add an alias for the room, as the appservice alias = RoomAlias(f"asns-{random_string(5)}", self.hs.hostname).to_string() - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", @@ -193,8 +189,7 @@ def set_alias_via_state_event( self.hs.hostname, ) - data = {"aliases": [self.random_alias(alias_length)]} - request_data = json.dumps(data) + request_data = {"aliases": [self.random_alias(alias_length)]} channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok @@ -206,8 +201,7 @@ def set_alias_via_directory( ) -> str: alias = self.random_alias(alias_length) url = "/_matrix/client/r0/directory/room/%s" % alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.user_tok diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index 299b9d21e24b..dc17c9d1138a 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -51,12 +50,11 @@ def test_3pid_lookup_disabled(self) -> None: self.assertEqual(channel.code, HTTPStatus.OK, channel.result) room_id = channel.json_body["room_id"] - params = { + request_data = { "id_server": "testis", "medium": "email", "address": "test@example.com", } - request_data = json.dumps(params) request_url = ("/rooms/%s/invite" % (room_id)).encode("ascii") channel = self.make_request( b"POST", request_url, request_data, access_token=tok diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index f6efa5fe37f5..a2958f6959e7 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -11,9 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json import time import urllib.parse +from http import HTTPStatus from typing import Any, Dict, List, Optional from unittest.mock import Mock from urllib.parse import urlencode @@ -261,20 +261,20 @@ def test_soft_logout(self) -> None: } channel = self.make_request(b"POST", LOGIN_URL, params) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) access_token = channel.json_body["access_token"] device_id = channel.json_body["device_id"] # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -288,7 +288,7 @@ def test_soft_logout(self) -> None: # more requests with the expired token should still return a soft-logout self.reactor.advance(3600) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -296,7 +296,7 @@ def test_soft_logout(self) -> None: self._delete_device(access_token_2, "kermit", "monkey", device_id) channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], False) @@ -307,7 +307,7 @@ def _delete_device( channel = self.make_request( b"DELETE", "devices/" + device_id, access_token=access_token ) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) # check it's a UI-Auth fail self.assertEqual( set(channel.json_body.keys()), @@ -330,7 +330,7 @@ def _delete_device( access_token=access_token, content={"auth": auth}, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) @override_config({"session_lifetime": "24h"}) def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: @@ -341,14 +341,14 @@ def test_session_can_hard_logout_after_being_soft_logged_out(self) -> None: # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -367,14 +367,14 @@ def test_session_can_hard_logout_all_sessions_after_being_soft_logged_out( # we should now be able to make requests with the access token channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # time passes self.reactor.advance(24 * 3600) # ... and we should be soft-logouted channel = self.make_request(b"GET", TEST_URL, access_token=access_token) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) self.assertEqual(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") self.assertEqual(channel.json_body["soft_logout"], True) @@ -399,7 +399,7 @@ def test_login_with_overly_long_device_id_fails(self) -> None: channel = self.make_request( "POST", "/_matrix/client/v3/login", - json.dumps(body).encode("utf8"), + body, custom_headers=None, ) @@ -466,7 +466,7 @@ def create_resource_dict(self) -> Dict[str, Resource]: def test_get_login_flows(self) -> None: """GET /login should return password and SSO flows""" channel = self.make_request("GET", "/_matrix/client/r0/login") - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) expected_flow_types = [ "m.login.cas", @@ -494,14 +494,14 @@ def test_multi_sso_redirect(self) -> None: """/login/sso/redirect should redirect to an identity picker""" # first hit the redirect url, which should redirect to our idp picker channel = self._make_sso_redirect_request(None) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers uri = location_headers[0] # hitting that picker should give us some HTML channel = self.make_request("GET", uri) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) # parse the form to check it has fields assumed elsewhere in this class html = channel.result["body"].decode("utf-8") @@ -530,7 +530,7 @@ def test_multi_sso_redirect_to_cas(self) -> None: + "&idp=cas", shorthand=False, ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers cas_uri = location_headers[0] @@ -555,7 +555,7 @@ def test_multi_sso_redirect_to_saml(self) -> None: + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=saml", ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers saml_uri = location_headers[0] @@ -579,7 +579,7 @@ def test_login_via_oidc(self) -> None: + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=oidc", ) - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -606,7 +606,7 @@ def test_login_via_oidc(self) -> None: channel = self.helper.complete_oidc_auth(oidc_uri, cookies, {"sub": "user1"}) # that should serve a confirmation page - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) content_type_headers = channel.headers.getRawHeaders("Content-Type") assert content_type_headers self.assertTrue(content_type_headers[-1].startswith("text/html")) @@ -634,7 +634,7 @@ def test_login_via_oidc(self) -> None: "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, 200, chan.result) + self.assertEqual(chan.code, HTTPStatus.OK, chan.result) self.assertEqual(chan.json_body["user_id"], "@user1:test") def test_multi_sso_redirect_to_unknown(self) -> None: @@ -643,18 +643,18 @@ def test_multi_sso_redirect_to_unknown(self) -> None: "GET", "/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz", ) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" channel = self._make_sso_redirect_request("xxx") - self.assertEqual(channel.code, 404, channel.result) + self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.result) self.assertEqual(channel.json_body["errcode"], "M_NOT_FOUND") def test_client_idp_redirect_to_oidc(self) -> None: """If the client pick a known IdP, redirect to it""" channel = self._make_sso_redirect_request("oidc") - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers oidc_uri = location_headers[0] @@ -765,7 +765,7 @@ def test_cas_redirect_confirm(self) -> None: channel = self.make_request("GET", cas_ticket_url) # Test that the response is HTML. - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) content_type_header_value = "" for header in channel.result.get("headers", []): if header[0] == b"Content-Type": @@ -1246,7 +1246,7 @@ def test_username_picker(self) -> None: ) # that should redirect to the username picker - self.assertEqual(channel.code, 302, channel.result) + self.assertEqual(channel.code, HTTPStatus.FOUND, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers picker_url = location_headers[0] @@ -1290,7 +1290,7 @@ def test_username_picker(self) -> None: ("Content-Length", str(len(content))), ], ) - self.assertEqual(chan.code, 302, chan.result) + self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1300,7 +1300,7 @@ def test_username_picker(self) -> None: path=location_headers[0], custom_headers=[("Cookie", "username_mapping_session=" + session_id)], ) - self.assertEqual(chan.code, 302, chan.result) + self.assertEqual(chan.code, HTTPStatus.FOUND, chan.result) location_headers = chan.headers.getRawHeaders("Location") assert location_headers @@ -1325,5 +1325,5 @@ def test_username_picker(self) -> None: "/login", content={"type": "m.login.token", "token": login_token}, ) - self.assertEqual(chan.code, 200, chan.result) + self.assertEqual(chan.code, HTTPStatus.OK, chan.result) self.assertEqual(chan.json_body["user_id"], "@bobby:test") diff --git a/tests/rest/client/test_password_policy.py b/tests/rest/client/test_password_policy.py index 3a74d2e96c49..e19d21d6ee53 100644 --- a/tests/rest/client/test_password_policy.py +++ b/tests/rest/client/test_password_policy.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor @@ -89,7 +88,7 @@ def test_get_policy(self) -> None: ) def test_password_too_short(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "shorty"}) + request_data = {"username": "kermit", "password": "shorty"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -100,7 +99,7 @@ def test_password_too_short(self) -> None: ) def test_password_no_digit(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "longerpassword"}) + request_data = {"username": "kermit", "password": "longerpassword"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -111,7 +110,7 @@ def test_password_no_digit(self) -> None: ) def test_password_no_symbol(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword"}) + request_data = {"username": "kermit", "password": "l0ngerpassword"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -122,7 +121,7 @@ def test_password_no_symbol(self) -> None: ) def test_password_no_uppercase(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "l0ngerpassword!"}) + request_data = {"username": "kermit", "password": "l0ngerpassword!"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -133,7 +132,7 @@ def test_password_no_uppercase(self) -> None: ) def test_password_no_lowercase(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "L0NGERPASSWORD!"}) + request_data = {"username": "kermit", "password": "L0NGERPASSWORD!"} channel = self.make_request("POST", self.register_url, request_data) self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) @@ -144,7 +143,7 @@ def test_password_no_lowercase(self) -> None: ) def test_password_compliant(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "L0ngerpassword!"}) + request_data = {"username": "kermit", "password": "L0ngerpassword!"} channel = self.make_request("POST", self.register_url, request_data) # Getting a 401 here means the password has passed validation and the server has @@ -161,16 +160,14 @@ def test_password_change(self) -> None: user_id = self.register_user("kermit", compliant_password) tok = self.login("kermit", compliant_password) - request_data = json.dumps( - { - "new_password": not_compliant_password, - "auth": { - "password": compliant_password, - "type": LoginType.PASSWORD, - "user": user_id, - }, - } - ) + request_data = { + "new_password": not_compliant_password, + "auth": { + "password": compliant_password, + "type": LoginType.PASSWORD, + "user": user_id, + }, + } channel = self.make_request( "POST", "/_matrix/client/r0/account/password", diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index afb08b2736de..f8e64ce6ac9c 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import datetime -import json import os from typing import Any, Dict, List, Tuple @@ -62,9 +61,10 @@ def test_POST_appservice_registration_valid(self) -> None: ) self.hs.get_datastores().main.services_cache.append(appservice) - request_data = json.dumps( - {"username": "as_user_kermit", "type": APP_SERVICE_REGISTRATION_TYPE} - ) + request_data = { + "username": "as_user_kermit", + "type": APP_SERVICE_REGISTRATION_TYPE, + } channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -85,7 +85,7 @@ def test_POST_appservice_registration_no_type(self) -> None: ) self.hs.get_datastores().main.services_cache.append(appservice) - request_data = json.dumps({"username": "as_user_kermit"}) + request_data = {"username": "as_user_kermit"} channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -95,9 +95,7 @@ def test_POST_appservice_registration_no_type(self) -> None: def test_POST_appservice_registration_invalid(self) -> None: self.appservice = None # no application service exists - request_data = json.dumps( - {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} - ) + request_data = {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) @@ -105,14 +103,14 @@ def test_POST_appservice_registration_invalid(self) -> None: self.assertEqual(channel.result["code"], b"401", channel.result) def test_POST_bad_password(self) -> None: - request_data = json.dumps({"username": "kermit", "password": 666}) + request_data = {"username": "kermit", "password": 666} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) self.assertEqual(channel.json_body["error"], "Invalid password") def test_POST_bad_username(self) -> None: - request_data = json.dumps({"username": 777, "password": "monkey"}) + request_data = {"username": 777, "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"400", channel.result) @@ -121,13 +119,12 @@ def test_POST_bad_username(self) -> None: def test_POST_user_valid(self) -> None: user_id = "@kermit:test" device_id = "frogfone" - params = { + request_data = { "username": "kermit", "password": "monkey", "device_id": device_id, "auth": {"type": LoginType.DUMMY}, } - request_data = json.dumps(params) channel = self.make_request(b"POST", self.url, request_data) det_data = { @@ -140,7 +137,7 @@ def test_POST_user_valid(self) -> None: @override_config({"enable_registration": False}) def test_POST_disabled_registration(self) -> None: - request_data = json.dumps({"username": "kermit", "password": "monkey"}) + request_data = {"username": "kermit", "password": "monkey"} self.auth_result = (None, {"username": "kermit", "password": "monkey"}, None) channel = self.make_request(b"POST", self.url, request_data) @@ -188,13 +185,12 @@ def test_POST_ratelimiting_guest(self) -> None: @override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}}) def test_POST_ratelimiting(self) -> None: for i in range(0, 6): - params = { + request_data = { "username": "kermit" + str(i), "password": "monkey", "device_id": "frogfone", "auth": {"type": LoginType.DUMMY}, } - request_data = json.dumps(params) channel = self.make_request(b"POST", self.url, request_data) if i == 5: @@ -234,7 +230,7 @@ def test_POST_registration_requires_token(self) -> None: } # Request without auth to get flows and session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) flows = channel.json_body["flows"] # Synapse adds a dummy stage to differentiate flows where otherwise one @@ -251,8 +247,7 @@ def test_POST_registration_requires_token(self) -> None: "token": token, "session": session, } - request_data = json.dumps(params) - channel = self.make_request(b"POST", self.url, request_data) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -262,8 +257,7 @@ def test_POST_registration_requires_token(self) -> None: "type": LoginType.DUMMY, "session": session, } - request_data = json.dumps(params) - channel = self.make_request(b"POST", self.url, request_data) + channel = self.make_request(b"POST", self.url, params) det_data = { "user_id": f"@{username}:{self.hs.hostname}", "home_server": self.hs.hostname, @@ -290,7 +284,7 @@ def test_POST_registration_token_invalid(self) -> None: "password": "monkey", } # Request without auth to get session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Test with token param missing (invalid) @@ -298,21 +292,21 @@ def test_POST_registration_token_invalid(self) -> None: "type": LoginType.REGISTRATION_TOKEN, "session": session, } - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.MISSING_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with non-string (invalid) params["auth"]["token"] = 1234 - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) self.assertEqual(channel.json_body["completed"], []) # Test with unknown token (invalid) params["auth"]["token"] = "1234" - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -337,9 +331,9 @@ def test_POST_registration_token_limit_uses(self) -> None: params1: JsonDict = {"username": "bert", "password": "monkey"} params2: JsonDict = {"username": "ernie", "password": "monkey"} # Do 2 requests without auth to get two session IDs - channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) + channel1 = self.make_request(b"POST", self.url, params1) session1 = channel1.json_body["session"] - channel2 = self.make_request(b"POST", self.url, json.dumps(params2)) + channel2 = self.make_request(b"POST", self.url, params2) session2 = channel2.json_body["session"] # Use token with session1 and check `pending` is 1 @@ -348,9 +342,9 @@ def test_POST_registration_token_limit_uses(self) -> None: "token": token, "session": session1, } - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Repeat request to make sure pending isn't increased again - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) pending = self.get_success( store.db_pool.simple_select_one_onecol( "registration_tokens", @@ -366,14 +360,14 @@ def test_POST_registration_token_limit_uses(self) -> None: "token": token, "session": session2, } - channel = self.make_request(b"POST", self.url, json.dumps(params2)) + channel = self.make_request(b"POST", self.url, params2) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) # Complete registration with session1 params1["auth"]["type"] = LoginType.DUMMY - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Check pending=0 and completed=1 res = self.get_success( store.db_pool.simple_select_one( @@ -386,7 +380,7 @@ def test_POST_registration_token_limit_uses(self) -> None: self.assertEqual(res["completed"], 1) # Check auth still fails when using token with session2 - channel = self.make_request(b"POST", self.url, json.dumps(params2)) + channel = self.make_request(b"POST", self.url, params2) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -411,7 +405,7 @@ def test_POST_registration_token_expiry(self) -> None: ) params: JsonDict = {"username": "kermit", "password": "monkey"} # Request without auth to get session - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Check authentication fails with expired token @@ -420,7 +414,7 @@ def test_POST_registration_token_expiry(self) -> None: "token": token, "session": session, } - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) self.assertEqual(channel.result["code"], b"401", channel.result) self.assertEqual(channel.json_body["errcode"], Codes.UNAUTHORIZED) self.assertEqual(channel.json_body["completed"], []) @@ -435,7 +429,7 @@ def test_POST_registration_token_expiry(self) -> None: ) # Check authentication succeeds - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) completed = channel.json_body["completed"] self.assertCountEqual([LoginType.REGISTRATION_TOKEN], completed) @@ -460,9 +454,9 @@ def test_POST_registration_token_session_expiry(self) -> None: # Do 2 requests without auth to get two session IDs params1: JsonDict = {"username": "bert", "password": "monkey"} params2: JsonDict = {"username": "ernie", "password": "monkey"} - channel1 = self.make_request(b"POST", self.url, json.dumps(params1)) + channel1 = self.make_request(b"POST", self.url, params1) session1 = channel1.json_body["session"] - channel2 = self.make_request(b"POST", self.url, json.dumps(params2)) + channel2 = self.make_request(b"POST", self.url, params2) session2 = channel2.json_body["session"] # Use token with both sessions @@ -471,18 +465,18 @@ def test_POST_registration_token_session_expiry(self) -> None: "token": token, "session": session1, } - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) params2["auth"] = { "type": LoginType.REGISTRATION_TOKEN, "token": token, "session": session2, } - self.make_request(b"POST", self.url, json.dumps(params2)) + self.make_request(b"POST", self.url, params2) # Complete registration with session1 params1["auth"]["type"] = LoginType.DUMMY - self.make_request(b"POST", self.url, json.dumps(params1)) + self.make_request(b"POST", self.url, params1) # Check `result` of registration token stage for session1 is `True` result1 = self.get_success( @@ -550,7 +544,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self) -> None: # Do request without auth to get a session ID params: JsonDict = {"username": "kermit", "password": "monkey"} - channel = self.make_request(b"POST", self.url, json.dumps(params)) + channel = self.make_request(b"POST", self.url, params) session = channel.json_body["session"] # Use token @@ -559,7 +553,7 @@ def test_POST_registration_token_session_expiry_deleted_token(self) -> None: "token": token, "session": session, } - self.make_request(b"POST", self.url, json.dumps(params)) + self.make_request(b"POST", self.url, params) # Delete token self.get_success( @@ -827,8 +821,7 @@ def test_manual_renewal(self) -> None: admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = {"user_id": user_id} - request_data = json.dumps(params) + request_data = {"user_id": user_id} channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -845,12 +838,11 @@ def test_manual_expire(self) -> None: admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = { + request_data = { "user_id": user_id, "expiration_ts": 0, "enable_renewal_emails": False, } - request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -870,12 +862,11 @@ def test_logging_out_expired_user(self) -> None: admin_tok = self.login("admin", "adminpassword") url = "/_synapse/admin/v1/account_validity/validity" - params = { + request_data = { "user_id": user_id, "expiration_ts": 0, "enable_renewal_emails": False, } - request_data = json.dumps(params) channel = self.make_request(b"POST", url, request_data, access_token=admin_tok) self.assertEqual(channel.result["code"], b"200", channel.result) @@ -1041,16 +1032,14 @@ def test_deactivated_user(self) -> None: (user_id, tok) = self.create_user() - request_data = json.dumps( - { - "auth": { - "type": "m.login.password", - "user": user_id, - "password": "monkey", - }, - "erase": False, - } - ) + request_data = { + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "monkey", + }, + "erase": False, + } channel = self.make_request( "POST", "account/deactivate", request_data, access_token=tok ) diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 20a259fc4388..ad0d0209f7ea 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json - from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -77,10 +75,7 @@ def test_reason_and_score_null(self) -> None: def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( - "POST", - self.report_path, - json.dumps(data), - access_token=self.other_user_tok, + "POST", self.report_path, data, access_token=self.other_user_tok ) self.assertEqual( response_status, int(channel.result["code"]), msg=channel.result["body"] diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index df7ffbe54538..c45cb3209025 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,6 +18,7 @@ """Tests REST events for /rooms paths.""" import json +from http import HTTPStatus from typing import Any, Dict, Iterable, List, Optional, Tuple, Union from unittest.mock import Mock, call from urllib import parse as urlparse @@ -104,7 +105,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: channel = self.make_request( "PUT", self.created_rmid_msg_path, b'{"msgtype":"m.text","body":"test msg"}' ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # set topic for public room channel = self.make_request( @@ -112,7 +113,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ("rooms/%s/state/m.room.topic" % self.created_public_rmid).encode("ascii"), b'{"topic":"Public Room Topic"}', ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) # auth as user_id now self.helper.auth_user_id = self.user_id @@ -134,28 +135,28 @@ def send_msg_path() -> str: "/rooms/%s/send/m.room.message/mid2" % (self.uncreated_rmid,), msg_content, ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room not joined (no state), expect 403 channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # send message in created room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # send message in created room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", send_msg_path(), msg_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_topic_perms(self) -> None: topic_content = b'{"topic":"My Topic Name"}' @@ -165,28 +166,28 @@ def test_topic_perms(self) -> None: channel = self.make_request( "PUT", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid, topic_content ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.uncreated_rmid ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room not joined, expect 403 channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set topic in created PRIVATE room and invited, expect 403 self.helper.invite( room=self.created_rmid, src=self.rmcreator_id, targ=self.user_id ) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # get topic in created PRIVATE room and invited, expect 403 channel = self.make_request("GET", topic_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set/get topic in created PRIVATE room and joined, expect 200 self.helper.join(room=self.created_rmid, user=self.user_id) @@ -194,25 +195,25 @@ def test_topic_perms(self) -> None: # Only room ops can set topic by default self.helper.auth_user_id = self.rmcreator_id channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.helper.auth_user_id = self.user_id channel = self.make_request("GET", topic_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(topic_content.decode("utf8")), channel.json_body) # set/get topic in created PRIVATE room and left, expect 403 self.helper.leave(room=self.created_rmid, user=self.user_id) channel = self.make_request("PUT", topic_path, topic_content) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", topic_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # get topic in PUBLIC room, not joined, expect 403 channel = self.make_request( "GET", "/rooms/%s/state/m.room.topic" % self.created_public_rmid ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) # set topic in PUBLIC room, not joined, expect 403 channel = self.make_request( @@ -220,7 +221,7 @@ def test_topic_perms(self) -> None: "/rooms/%s/state/m.room.topic" % self.created_public_rmid, topic_content, ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def _test_get_membership( self, room: str, members: Iterable = frozenset(), expect_code: int = 200 @@ -309,14 +310,14 @@ def test_invited_permissions(self) -> None: src=self.user_id, targ=self.rmcreator_id, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) self.helper.change_membership( room=room, src=self.user_id, targ=self.rmcreator_id, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) def test_joined_permissions(self) -> None: @@ -342,7 +343,7 @@ def test_joined_permissions(self) -> None: src=self.user_id, targ=other, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # set left of other, expect 403 @@ -351,7 +352,7 @@ def test_joined_permissions(self) -> None: src=self.user_id, targ=other, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # set left of self, expect 200 @@ -371,7 +372,7 @@ def test_leave_permissions(self) -> None: src=self.user_id, targ=usr, membership=Membership.INVITE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) self.helper.change_membership( @@ -379,7 +380,7 @@ def test_leave_permissions(self) -> None: src=self.user_id, targ=usr, membership=Membership.JOIN, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # It is always valid to LEAVE if you've already left (currently.) @@ -388,7 +389,7 @@ def test_leave_permissions(self) -> None: src=self.user_id, targ=self.rmcreator_id, membership=Membership.LEAVE, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, ) # tests the "from banned" line from the table in https://spec.matrix.org/unstable/client-server-api/#mroommember @@ -405,7 +406,7 @@ def test_member_event_from_ban(self) -> None: src=self.user_id, targ=other, membership=Membership.BAN, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.FORBIDDEN, ) @@ -415,7 +416,7 @@ def test_member_event_from_ban(self) -> None: src=self.rmcreator_id, targ=other, membership=Membership.BAN, - expect_code=200, + expect_code=HTTPStatus.OK, ) # from ban to invite: Must never happen. @@ -424,7 +425,7 @@ def test_member_event_from_ban(self) -> None: src=self.rmcreator_id, targ=other, membership=Membership.INVITE, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -434,7 +435,7 @@ def test_member_event_from_ban(self) -> None: src=other, targ=other, membership=Membership.JOIN, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -444,7 +445,7 @@ def test_member_event_from_ban(self) -> None: src=self.rmcreator_id, targ=other, membership=Membership.BAN, - expect_code=200, + expect_code=HTTPStatus.OK, ) # from ban to knock: Must never happen. @@ -453,7 +454,7 @@ def test_member_event_from_ban(self) -> None: src=self.rmcreator_id, targ=other, membership=Membership.KNOCK, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.BAD_STATE, ) @@ -463,7 +464,7 @@ def test_member_event_from_ban(self) -> None: src=self.user_id, targ=other, membership=Membership.LEAVE, - expect_code=403, # expect failure + expect_code=HTTPStatus.FORBIDDEN, # expect failure expect_errcode=Codes.FORBIDDEN, ) @@ -473,7 +474,7 @@ def test_member_event_from_ban(self) -> None: src=self.rmcreator_id, targ=other, membership=Membership.LEAVE, - expect_code=200, + expect_code=HTTPStatus.OK, ) @@ -493,7 +494,7 @@ def test_get_state_cancellation(self) -> None: "/rooms/%s/state" % room_id, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertCountEqual( [state_event["type"] for state_event in channel.json_body], { @@ -516,7 +517,7 @@ def test_get_state_event_cancellation(self) -> None: "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id), ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(channel.json_body, {"membership": "join"}) @@ -530,16 +531,16 @@ class RoomsMemberListTestCase(RoomBase): def test_get_member_list(self) -> None: room_id = self.helper.create_room_as(self.user_id) channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_get_member_list_no_room(self) -> None: channel = self.make_request("GET", "/rooms/roomdoesnotexist/members") - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission(self) -> None: room_id = self.helper.create_room_as("@some_other_guy:red") channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_with_at_token(self) -> None: """ @@ -550,7 +551,7 @@ def test_get_member_list_no_permission_with_at_token(self) -> None: # first sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check that permission is denied for @sid1:red to get the @@ -559,7 +560,7 @@ def test_get_member_list_no_permission_with_at_token(self) -> None: "GET", f"/rooms/{room_id}/members?at={sync_token}", ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member(self) -> None: """ @@ -572,14 +573,14 @@ def test_get_member_list_no_permission_former_member(self) -> None: # check that the user can see the member list to start with channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # ban the user self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # check the user can no longer see the member list channel = self.make_request("GET", "/rooms/%s/members" % room_id) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_no_permission_former_member_with_at_token(self) -> None: """ @@ -593,14 +594,14 @@ def test_get_member_list_no_permission_former_member_with_at_token(self) -> None # sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check that the user can see the member list to start with channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # ban the user (Note: the user is actually allowed to see this event and # state so that they know they're banned!) @@ -612,14 +613,14 @@ def test_get_member_list_no_permission_former_member_with_at_token(self) -> None # now, with the original user, sync again to get a new at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] # check the user can no longer see the updated member list channel = self.make_request( "GET", "/rooms/%s/members?at=%s" % (room_id, sync_token) ) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) def test_get_member_list_mixed_memberships(self) -> None: room_creator = "@some_other_guy:red" @@ -628,17 +629,17 @@ def test_get_member_list_mixed_memberships(self) -> None: self.helper.invite(room=room_id, src=room_creator, targ=self.user_id) # can't see list if you're just invited. channel = self.make_request("GET", room_path) - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.helper.join(room=room_id, user=self.user_id) # can see list now joined channel = self.make_request("GET", room_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.helper.leave(room=room_id, user=self.user_id) # can see old list once left channel = self.make_request("GET", room_path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_get_member_list_cancellation(self) -> None: """Test cancellation of a `/rooms/$room_id/members` request.""" @@ -651,7 +652,7 @@ def test_get_member_list_cancellation(self) -> None: "/rooms/%s/members" % room_id, ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertLessEqual( { @@ -671,7 +672,7 @@ def test_get_member_list_with_at_token_cancellation(self) -> None: # first sync to get an at token channel = self.make_request("GET", "/sync") - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) sync_token = channel.json_body["next_batch"] channel = make_request_with_cancellation_test( @@ -682,7 +683,7 @@ def test_get_member_list_with_at_token_cancellation(self) -> None: "/rooms/%s/members?at=%s" % (room_id, sync_token), ) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(len(channel.json_body["chunk"]), 1) self.assertLessEqual( { @@ -706,10 +707,10 @@ def test_post_room_no_keys(self) -> None: # POST with no config keys, expect new room id channel = self.make_request("POST", "/createRoom", "{}") - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(37, channel.resource_usage.db_txn_count) + self.assertEqual(44, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -719,21 +720,21 @@ def test_post_room_initial_state(self) -> None: b'{"initial_state":[{"type": "m.bridge", "content": {}}]}', ) - self.assertEqual(200, channel.code, channel.result) + self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(41, channel.resource_usage.db_txn_count) + self.assertEqual(50, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id channel = self.make_request("POST", "/createRoom", b'{"visibility":"private"}') - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_custom_key(self) -> None: # POST with custom config keys, expect new room id channel = self.make_request("POST", "/createRoom", b'{"custom":"stuff"}') - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_known_and_unknown_keys(self) -> None: @@ -741,16 +742,16 @@ def test_post_room_known_and_unknown_keys(self) -> None: channel = self.make_request( "POST", "/createRoom", b'{"visibility":"private","custom":"things"}' ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("room_id" in channel.json_body) def test_post_room_invalid_content(self) -> None: # POST with invalid content / paths, expect 400 channel = self.make_request("POST", "/createRoom", b'{"visibili') - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) channel = self.make_request("POST", "/createRoom", b'["hello"]') - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) def test_post_room_invitees_invalid_mxid(self) -> None: # POST with invalid invitee, see https://github.com/matrix-org/synapse/issues/4088 @@ -758,7 +759,7 @@ def test_post_room_invitees_invalid_mxid(self) -> None: channel = self.make_request( "POST", "/createRoom", b'{"invite":["@alice:example.com "]}' ) - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) def test_post_room_invitees_ratelimit(self) -> None: @@ -769,20 +770,18 @@ def test_post_room_invitees_ratelimit(self) -> None: # Build the request's content. We use local MXIDs because invites over federation # are more difficult to mock. - content = json.dumps( - { - "invite": [ - "@alice1:red", - "@alice2:red", - "@alice3:red", - "@alice4:red", - ] - } - ).encode("utf8") + content = { + "invite": [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + } # Test that the invites are correctly ratelimited. channel = self.make_request("POST", "/createRoom", content) - self.assertEqual(400, channel.code) + self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code) self.assertEqual( "Cannot invite so many users at once", channel.json_body["error"], @@ -795,7 +794,7 @@ def test_post_room_invitees_ratelimit(self) -> None: # Test that the invites aren't ratelimited anymore. channel = self.make_request("POST", "/createRoom", content) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed @@ -819,7 +818,7 @@ async def user_may_join_room( "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -845,7 +844,7 @@ async def user_may_join_room_codes( "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -865,7 +864,7 @@ async def user_may_join_room_tuple( "/createRoom", {}, ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) self.assertEqual(join_mock.call_count, 0) @@ -882,54 +881,68 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: def test_invalid_puts(self) -> None: # missing keys or invalid json channel = self.make_request("PUT", self.path, "{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, '{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, '{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request( "PUT", self.path, '[{"_name":"bo"},{"_name":"jill"}]' ) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, "text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", self.path, "") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # valid key, wrong type content = '{"topic":["Topic name"]}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_topic(self) -> None: # nothing should be there channel = self.make_request("GET", self.path) - self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.result["body"]) # valid put content = '{"topic":"Topic name"}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) def test_rooms_topic_with_extra_keys(self) -> None: # valid put with extra keys content = '{"topic":"Seasons","subtopic":"Summer"}' channel = self.make_request("PUT", self.path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # valid get channel = self.make_request("GET", self.path) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assert_dict(json.loads(content), channel.json_body) @@ -945,22 +958,34 @@ def test_invalid_puts(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id) # missing keys or invalid json channel = self.make_request("PUT", path, "{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, '{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, '{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, "text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, "") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # valid keys, wrong types content = '{"membership":["%s","%s","%s"]}' % ( @@ -969,7 +994,9 @@ def test_invalid_puts(self) -> None: Membership.LEAVE, ) channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_members_self(self) -> None: path = "/rooms/%s/state/m.room.member/%s" % ( @@ -980,10 +1007,10 @@ def test_rooms_members_self(self) -> None: # valid join message (NOOP since we made the room) content = '{"membership":"%s"}' % Membership.JOIN channel = self.make_request("PUT", path, content.encode("ascii")) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) expected_response = {"membership": Membership.JOIN} self.assertEqual(expected_response, channel.json_body) @@ -998,10 +1025,10 @@ def test_rooms_members_other(self) -> None: # valid invite message content = '{"membership":"%s"}' % Membership.INVITE channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) def test_rooms_members_other_custom_keys(self) -> None: @@ -1017,10 +1044,10 @@ def test_rooms_members_other_custom_keys(self) -> None: "Join us!", ) channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) channel = self.make_request("GET", path, content=b"") - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) self.assertEqual(json.loads(content), channel.json_body) @@ -1137,7 +1164,9 @@ async def user_may_join_room( # Now make the callback deny all room joins, and check that a join actually fails. return_value = False - self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + self.helper.join( + self.room3, self.user2, expect_code=HTTPStatus.FORBIDDEN, tok=self.tok2 + ) def test_spam_checker_may_join_room(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called @@ -1205,7 +1234,7 @@ async def user_may_join_room( self.helper.join( self.room3, self.user2, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, expect_errcode=return_value, tok=self.tok2, ) @@ -1216,7 +1245,7 @@ async def user_may_join_room( self.helper.join( self.room3, self.user2, - expect_code=403, + expect_code=HTTPStatus.FORBIDDEN, expect_errcode=return_value[0], tok=self.tok2, expect_additional_fields=return_value[1], @@ -1270,7 +1299,7 @@ def test_join_local_ratelimit_profile_change(self) -> None: # Update the display name for the user. path = "/_matrix/client/r0/profile/%s/displayname" % self.user_id channel = self.make_request("PUT", path, {"displayname": "John Doe"}) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) # Check that all the rooms have been sent a profile update into. for room_id in room_ids: @@ -1335,71 +1364,93 @@ def test_invalid_puts(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) # missing keys or invalid json channel = self.make_request("PUT", path, b"{}") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'{"_name":"bo"}') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'{"nao') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b'[{"_name":"bo"},{"_name":"jill"}]') - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b"text only") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) channel = self.make_request("PUT", path, b"") - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) def test_rooms_messages_sent(self) -> None: path = "/rooms/%s/send/m.room.message/mid1" % (urlparse.quote(self.room_id)) content = b'{"body":"test","msgtype":{"type":"a"}}' channel = self.make_request("PUT", path, content) - self.assertEqual(400, channel.code, msg=channel.result["body"]) + self.assertEqual( + HTTPStatus.BAD_REQUEST, channel.code, msg=channel.result["body"] + ) # custom message types content = b'{"body":"test","msgtype":"test.custom.text"}' channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) # m.text message type path = "/rooms/%s/send/m.room.message/mid2" % (urlparse.quote(self.room_id)) content = b'{"body":"test2","msgtype":"m.text"}' channel = self.make_request("PUT", path, content) - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) @parameterized.expand( [ # Allow param( - name="NOT_SPAM", value="NOT_SPAM", expected_code=200, expected_fields={} + name="NOT_SPAM", + value="NOT_SPAM", + expected_code=HTTPStatus.OK, + expected_fields={}, + ), + param( + name="False", + value=False, + expected_code=HTTPStatus.OK, + expected_fields={}, ), - param(name="False", value=False, expected_code=200, expected_fields={}), # Block param( name="scalene string", value="ANY OTHER STRING", - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_FORBIDDEN"}, ), param( name="True", value=True, - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_FORBIDDEN"}, ), param( name="Code", value=Codes.LIMIT_EXCEEDED, - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={"errcode": "M_LIMIT_EXCEEDED"}, ), param( name="Tuple", value=(Codes.SERVER_NOT_TRUSTED, {"additional_field": "12345"}), - expected_code=403, + expected_code=HTTPStatus.FORBIDDEN, expected_fields={ "errcode": "M_SERVER_NOT_TRUSTED", "additional_field": "12345", @@ -1584,7 +1635,7 @@ def test_creator_can_post_state_event(self) -> None: channel = self.make_request("PUT", path, "{}") # Then I am allowed - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) def test_normal_user_can_not_post_state_event(self) -> None: # Given I am a normal member of a room @@ -1598,7 +1649,7 @@ def test_normal_user_can_not_post_state_event(self) -> None: channel = self.make_request("PUT", path, "{}") # Then I am not allowed because state events require PL>=50 - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " "user_level (0) < send_level (50)", @@ -1625,7 +1676,7 @@ def test_with_config_override_normal_user_can_post_state_event(self) -> None: channel = self.make_request("PUT", path, "{}") # Then I am allowed - self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.result["body"]) @unittest.override_config( { @@ -1653,7 +1704,7 @@ def test_any_room_override_defeats_config_override(self) -> None: channel = self.make_request("PUT", path, "{}") # Then I am not allowed - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) @unittest.override_config( { @@ -1681,7 +1732,7 @@ def test_specific_room_override_defeats_config_override(self) -> None: channel = self.make_request("PUT", path, "{}") # Then I am not allowed - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " + "user_level (0) < send_level (1)", @@ -1712,7 +1763,7 @@ def test_config_override_applies_only_to_specific_preset(self) -> None: # Then I am not allowed because the public_chat config does not # affect this room, because this room is a private_chat - self.assertEqual(403, channel.code, msg=channel.result["body"]) + self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.result["body"]) self.assertEqual( "You don't have permission to post that to the room. " + "user_level (0) < send_level (50)", @@ -1731,7 +1782,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: def test_initial_sync(self) -> None: channel = self.make_request("GET", "/rooms/%s/initialSync" % self.room_id) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertEqual(self.room_id, channel.json_body["room_id"]) self.assertEqual("join", channel.json_body["membership"]) @@ -1774,7 +1825,7 @@ def test_topo_token_is_accepted(self) -> None: channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("start" in channel.json_body) self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) @@ -1785,7 +1836,7 @@ def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) - self.assertEqual(200, channel.code) + self.assertEqual(HTTPStatus.OK, channel.code) self.assertTrue("start" in channel.json_body) self.assertEqual(token, channel.json_body["start"]) self.assertTrue("chunk" in channel.json_body) @@ -1824,7 +1875,7 @@ def test_room_messages_purge(self) -> None: json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) @@ -1852,7 +1903,7 @@ def test_room_messages_purge(self) -> None: json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 1, [event["content"] for event in chunk]) @@ -1869,7 +1920,7 @@ def test_room_messages_purge(self) -> None: json.dumps({"types": [EventTypes.Message]}), ), ) - self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) chunk = channel.json_body["chunk"] self.assertEqual(len(chunk), 0, [event["content"] for event in chunk]) @@ -1997,14 +2048,14 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: def test_restricted_no_auth(self) -> None: channel = self.make_request("GET", self.url) - self.assertEqual(channel.code, 401, channel.result) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) def test_restricted_auth(self) -> None: self.register_user("user", "pass") tok = self.login("user", "pass") channel = self.make_request("GET", self.url, access_token=tok) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): @@ -2123,7 +2174,7 @@ def test_simple(self) -> None: content={"filter": search_filter}, access_token=self.token, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.federation_client.get_public_rooms.assert_called_once_with( # type: ignore[attr-defined] "testserv", @@ -2140,7 +2191,7 @@ def test_fallback(self) -> None: # The `get_public_rooms` should be called again if the first call fails # with a 404, when using search filters. self.federation_client.get_public_rooms.side_effect = ( # type: ignore[attr-defined] - HttpResponseException(404, "Not Found", b""), + HttpResponseException(HTTPStatus.NOT_FOUND, "Not Found", b""), make_awaitable({}), ) @@ -2152,7 +2203,7 @@ def test_fallback(self) -> None: content={"filter": search_filter}, access_token=self.token, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.federation_client.get_public_rooms.assert_has_calls( # type: ignore[attr-defined] [ @@ -2198,21 +2249,19 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Set a profile for the test user self.displayname = "test user" - data = {"displayname": self.displayname} - request_data = json.dumps(data) + request_data = {"displayname": self.displayname} channel = self.make_request( "PUT", "/_matrix/client/r0/profile/%s/displayname" % (self.user_id,), request_data, access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) def test_per_room_profile_forbidden(self) -> None: - data = {"membership": "join", "displayname": "other test user"} - request_data = json.dumps(data) + request_data = {"membership": "join", "displayname": "other test user"} channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" @@ -2220,7 +2269,7 @@ def test_per_room_profile_forbidden(self) -> None: request_data, access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) event_id = channel.json_body["event_id"] channel = self.make_request( @@ -2228,7 +2277,7 @@ def test_per_room_profile_forbidden(self) -> None: "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) res_displayname = channel.json_body["content"]["displayname"] self.assertEqual(res_displayname, self.displayname, channel.result) @@ -2262,7 +2311,7 @@ def test_join_reason(self) -> None: content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2276,7 +2325,7 @@ def test_leave_reason(self) -> None: content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2290,7 +2339,7 @@ def test_kick_reason(self) -> None: content={"reason": reason, "user_id": self.second_user_id}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2304,7 +2353,7 @@ def test_ban_reason(self) -> None: content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2316,7 +2365,7 @@ def test_unban_reason(self) -> None: content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2328,7 +2377,7 @@ def test_invite_reason(self) -> None: content={"reason": reason, "user_id": self.second_user_id}, access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2347,7 +2396,7 @@ def test_reject_invite_reason(self) -> None: content={"reason": reason}, access_token=self.second_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) self._check_for_reason(reason) @@ -2359,7 +2408,7 @@ def _check_for_reason(self, reason: str) -> None: ), access_token=self.creator_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) event_content = channel.json_body @@ -2407,7 +2456,7 @@ def test_context_filter_labels(self) -> None: % (self.room_id, event_id, json.dumps(self.FILTER_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2437,7 +2486,7 @@ def test_context_filter_not_labels(self) -> None: % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2472,7 +2521,7 @@ def test_context_filter_labels_not_labels(self) -> None: % (self.room_id, event_id, json.dumps(self.FILTER_LABELS_NOT_LABELS)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2552,16 +2601,14 @@ def test_messages_filter_labels_not_labels(self) -> None: def test_search_filter_labels(self) -> None: """Test that we can filter by a label on a /search request.""" - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -2589,16 +2636,14 @@ def test_search_filter_labels(self) -> None: def test_search_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /search request.""" - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_NOT_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_NOT_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -2638,16 +2683,14 @@ def test_search_filter_labels_not_labels(self) -> None: """Test that we can filter by both a label and the absence of another label on a /search request. """ - request_data = json.dumps( - { - "search_categories": { - "room_events": { - "search_term": "label", - "filter": self.FILTER_LABELS_NOT_LABELS, - } + request_data = { + "search_categories": { + "room_events": { + "search_term": "label", + "filter": self.FILTER_LABELS_NOT_LABELS, } } - ) + } self._send_labelled_messages_in_room() @@ -2820,7 +2863,7 @@ def _filter_messages(self, filter: JsonDict) -> List[JsonDict]: "/rooms/%s/messages?filter=%s&dir=b" % (self.room_id, json.dumps(filter)), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) return channel.json_body["chunk"] @@ -2925,7 +2968,7 @@ def test_erased_sender(self) -> None: % (self.room_id, event_id), access_token=self.tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -2991,7 +3034,7 @@ def test_erased_sender(self) -> None: % (self.room_id, event_id), access_token=invited_tok, ) - self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) events_before = channel.json_body["events_before"] @@ -3092,8 +3135,7 @@ def _random_alias(self) -> str: def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok @@ -3122,8 +3164,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: def _set_alias_via_directory(self, alias: str, expected_code: int = 200) -> None: url = "/_matrix/client/r0/directory/room/" + alias - data = {"room_id": self.room_id} - request_data = json.dumps(data) + request_data = {"room_id": self.room_id} channel = self.make_request( "PUT", url, request_data, access_token=self.room_owner_tok @@ -3149,7 +3190,7 @@ def _set_canonical_alias( channel = self.make_request( "PUT", "rooms/%s/state/m.room.canonical_alias" % (self.room_id,), - json.dumps(content), + content, access_token=self.room_owner_tok, ) self.assertEqual(channel.code, expected_code, channel.result) @@ -3283,7 +3324,7 @@ def test_threepid_invite_spamcheck_deprecated(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable(0)) + make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), @@ -3344,7 +3385,7 @@ def test_threepid_invite_spamcheck(self) -> None: # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. - make_invite_mock = Mock(return_value=make_awaitable(0)) + make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock self.hs.get_identity_handler().lookup_3pid = Mock( return_value=make_awaitable(None), diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index e3efd1f1b096..b085c50356aa 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -606,11 +606,10 @@ def test_unread_counts(self) -> None: self._check_unread_count(1) # Send a read receipt to tell the server we've read the latest event. - body = json.dumps({ReceiptTypes.READ: res["event_id"]}).encode("utf8") channel = self.make_request( "POST", f"/rooms/{self.room_id}/read_markers", - body, + {ReceiptTypes.READ: res["event_id"]}, access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.json_body) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 5eb0f243f747..9a48e9286fe4 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -21,7 +21,6 @@ from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase -from synapse.events.snapshot import EventContext from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.rest import admin from synapse.rest.client import account, login, profile, room @@ -113,14 +112,8 @@ async def approve_all_signature_checking( # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. - async def _check_event_auth( - origin: str, - event: EventBase, - context: EventContext, - *args: Any, - **kwargs: Any, - ) -> EventContext: - return context + async def _check_event_auth(origin: Any, event: Any, context: Any) -> None: + pass hs.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 93f749744d0a..105d4186988b 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -136,7 +136,7 @@ def create_room_as( self.site, "POST", path, - json.dumps(content).encode("utf8"), + content, custom_headers=custom_headers, ) @@ -210,7 +210,7 @@ def knock( self.site, "POST", path, - json.dumps(data).encode("utf8"), + data, ) assert ( @@ -309,7 +309,7 @@ def change_membership( self.site, "PUT", path, - json.dumps(data).encode("utf8"), + data, ) assert ( @@ -392,7 +392,7 @@ def send_event( self.site, "PUT", path, - json.dumps(content or {}).encode("utf8"), + content or {}, custom_headers=custom_headers, ) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 79727c430fe5..d18fc13c21d6 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -126,7 +126,9 @@ class _TestImage: expected_scaled: The expected bytes from scaled thumbnailing, or None if test should just check for a valid image returned. expected_found: True if the file should exist on the server, or False if - a 404 is expected. + a 404/400 is expected. + unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or + False if the thumbnailing should succeed or a normal 404 is expected. """ data: bytes @@ -135,6 +137,7 @@ class _TestImage: expected_cropped: Optional[bytes] = None expected_scaled: Optional[bytes] = None expected_found: bool = True + unable_to_thumbnail: bool = False @parameterized_class( @@ -192,6 +195,7 @@ class _TestImage: b"image/gif", b".gif", expected_found=False, + unable_to_thumbnail=True, ), ), ], @@ -366,18 +370,29 @@ def test_disposition_none(self) -> None: def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( - "crop", self.test_image.expected_cropped, self.test_image.expected_found + "crop", + self.test_image.expected_cropped, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( - "scale", self.test_image.expected_scaled, self.test_image.expected_found + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" - self._test_thumbnail("invalid", None, False) + self._test_thumbnail( + "invalid", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} @@ -386,7 +401,12 @@ def test_no_thumbnail_crop(self) -> None: """ Override the config to generate only scaled thumbnails, but request a cropped one. """ - self._test_thumbnail("crop", None, False) + self._test_thumbnail( + "crop", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) @unittest.override_config( {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} @@ -395,14 +415,22 @@ def test_no_thumbnail_scale(self) -> None: """ Override the config to generate only cropped thumbnails, but request a scaled one. """ - self._test_thumbnail("scale", None, False) + self._test_thumbnail( + "scale", + None, + expected_found=False, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, + ) def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. """ self._test_thumbnail( - "scale", self.test_image.expected_scaled, self.test_image.expected_found + "scale", + self.test_image.expected_scaled, + expected_found=self.test_image.expected_found, + unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) if not self.test_image.expected_found: @@ -459,8 +487,24 @@ def test_thumbnail_repeated_thumbnail(self) -> None: ) def _test_thumbnail( - self, method: str, expected_body: Optional[bytes], expected_found: bool + self, + method: str, + expected_body: Optional[bytes], + expected_found: bool, + unable_to_thumbnail: bool = False, ) -> None: + """Test the given thumbnailing method works as expected. + + Args: + method: The thumbnailing method to use (crop, scale). + expected_body: The expected bytes from thumbnailing, or None if + test should just check for a valid image. + expected_found: True if the file should exist on the server, or False if + a 404/400 is expected. + unable_to_thumbnail: True if we expect the thumbnailing to fail (400), or + False if the thumbnailing should succeed or a normal 404 is expected. + """ + params = "?width=32&height=32&method=" + method channel = make_request( self.reactor, @@ -496,6 +540,16 @@ def _test_thumbnail( else: # ensure that the result is at least some valid image Image.open(BytesIO(channel.result["body"])) + elif unable_to_thumbnail: + # A 400 with a JSON body. + self.assertEqual(channel.code, 400) + self.assertEqual( + channel.json_body, + { + "errcode": "M_UNKNOWN", + "error": "Cannot find any thumbnails for the requested media ([b'example.com', b'12345']). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)", + }, + ) else: # A 404 with a JSON body. self.assertEqual(channel.code, 404) diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 38963ce4a74c..46d829b062a0 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -143,7 +143,7 @@ def prepare(self, reactor, clock, hs): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) def test_simple(self): """Test that we cache events that we pull from the DB.""" @@ -160,7 +160,7 @@ def test_event_ref(self): """ # Reset the event cache - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) with LoggingContext("test") as ctx: # We keep hold of the event event though we never use it. @@ -170,7 +170,7 @@ def test_event_ref(self): self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) # Reset the event cache - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) @@ -345,7 +345,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): self.event_id = res["event_id"] # Reset the event cache so the tests start with it empty - self.store._get_event_cache.clear() + self.get_success(self.store._get_event_cache.clear()) @contextmanager def blocking_get_event_calls( diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index e8c53f16d9c5..ba40124c8a00 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest.mock import Mock - from twisted.test.proto_helpers import MemoryReactor +from synapse.rest import admin +from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.storage.databases.main.event_push_actions import NotifCounts from synapse.util import Clock @@ -24,15 +24,14 @@ USER_ID = "@user:example.com" -PlAIN_NOTIF = ["notify", {"set_tweak": "highlight", "value": False}] -HIGHLIGHT = [ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, -] - class EventPushActionsStoreTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main persist_events_store = hs.get_datastores().persist_events @@ -54,154 +53,118 @@ def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None: ) def test_count_aggregation(self) -> None: - room_id = "!foo:example.com" - user_id = "@user1235:test" + # Create a user to receive notifications and send receipts. + user_id = self.register_user("user1235", "pass") + token = self.login("user1235", "pass") + + # And another users to send events. + other_id = self.register_user("other", "pass") + other_token = self.login("other", "pass") + + # Create a room and put both users in it. + room_id = self.helper.create_room_as(user_id, tok=token) + self.helper.join(room_id, other_id, tok=other_token) - last_read_stream_ordering = [0] + last_event_id: str - def _assert_counts(noitf_count: int, highlight_count: int) -> None: + def _assert_counts( + noitf_count: int, unread_count: int, highlight_count: int + ) -> None: counts = self.get_success( self.store.db_pool.runInteraction( - "", - self.store._get_unread_counts_by_pos_txn, + "get-unread-counts", + self.store._get_unread_counts_by_receipt_txn, room_id, user_id, - last_read_stream_ordering[0], ) ) self.assertEqual( counts, NotifCounts( notify_count=noitf_count, - unread_count=0, # Unread counts are tested in the sync tests. + unread_count=unread_count, highlight_count=highlight_count, ), ) - def _inject_actions(stream: int, action: list) -> None: - event = Mock() - event.room_id = room_id - event.event_id = f"$test{stream}:example.com" - event.internal_metadata.stream_ordering = stream - event.internal_metadata.is_outlier.return_value = False - event.depth = stream - - self.store._events_stream_cache.entity_has_changed(room_id, stream) - - self.get_success( - self.store.db_pool.simple_insert( - table="events", - values={ - "stream_ordering": stream, - "topological_ordering": stream, - "type": "m.room.message", - "room_id": room_id, - "processed": True, - "outlier": False, - "event_id": event.event_id, - }, - ) + def _create_event(highlight: bool = False) -> str: + result = self.helper.send_event( + room_id, + type="m.room.message", + content={"msgtype": "m.text", "body": user_id if highlight else "msg"}, + tok=other_token, ) + nonlocal last_event_id + last_event_id = result["event_id"] + return last_event_id - self.get_success( - self.store.add_push_actions_to_staging( - event.event_id, - {user_id: action}, - False, - ) - ) - self.get_success( - self.store.db_pool.runInteraction( - "", - self.persist_events_store._set_push_actions_for_event_and_users_txn, - [(event, None)], - [(event, None)], - ) - ) - - def _rotate(stream: int) -> None: - self.get_success( - self.store.db_pool.runInteraction( - "rotate-receipts", self.store._handle_new_receipts_for_notifs_txn - ) - ) - - self.get_success( - self.store.db_pool.runInteraction( - "rotate-notifs", self.store._rotate_notifs_before_txn, stream - ) - ) - - def _mark_read(stream: int, depth: int) -> None: - last_read_stream_ordering[0] = stream + def _rotate() -> None: + self.get_success(self.store._rotate_notifs()) + def _mark_read(event_id: str) -> None: self.get_success( self.store.insert_receipt( room_id, "m.read", user_id=user_id, - event_ids=[f"$test{stream}:example.com"], + event_ids=[event_id], data={}, ) ) - _assert_counts(0, 0) - _inject_actions(1, PlAIN_NOTIF) - _assert_counts(1, 0) - _rotate(1) - _assert_counts(1, 0) + _assert_counts(0, 0, 0) + _create_event() + _assert_counts(1, 1, 0) + _rotate() + _assert_counts(1, 1, 0) - _inject_actions(3, PlAIN_NOTIF) - _assert_counts(2, 0) - _rotate(3) - _assert_counts(2, 0) + event_id = _create_event() + _assert_counts(2, 2, 0) + _rotate() + _assert_counts(2, 2, 0) - _inject_actions(5, PlAIN_NOTIF) - _mark_read(3, 3) - _assert_counts(1, 0) + _create_event() + _mark_read(event_id) + _assert_counts(1, 1, 0) - _mark_read(5, 5) - _assert_counts(0, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) - _inject_actions(6, PlAIN_NOTIF) - _rotate(6) - _assert_counts(1, 0) - - self.get_success( - self.store.db_pool.simple_delete( - table="event_push_actions", keyvalues={"1": 1}, desc="" - ) - ) + _create_event() + _rotate() + _assert_counts(1, 1, 0) - _assert_counts(1, 0) + # Delete old event push actions, this should not affect the (summarised) count. + self.get_success(self.store._remove_old_push_actions_that_have_rotated()) + _assert_counts(1, 1, 0) - _mark_read(6, 6) - _assert_counts(0, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) - _inject_actions(8, HIGHLIGHT) - _assert_counts(1, 1) - _rotate(8) - _assert_counts(1, 1) + event_id = _create_event(True) + _assert_counts(1, 1, 1) + _rotate() + _assert_counts(1, 1, 1) # Check that adding another notification and rotating after highlight # works. - _inject_actions(10, PlAIN_NOTIF) - _rotate(10) - _assert_counts(2, 1) + _create_event() + _rotate() + _assert_counts(2, 2, 1) # Check that sending read receipts at different points results in the # right counts. - _mark_read(8, 8) - _assert_counts(1, 0) - _mark_read(10, 10) - _assert_counts(0, 0) - - _inject_actions(11, HIGHLIGHT) - _assert_counts(1, 1) - _mark_read(11, 11) - _assert_counts(0, 0) - _rotate(11) - _assert_counts(0, 0) + _mark_read(event_id) + _assert_counts(1, 1, 0) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) + + _create_event(True) + _assert_counts(1, 1, 1) + _mark_read(last_event_id) + _assert_counts(0, 0, 0) + _rotate() + _assert_counts(0, 0, 0) def test_find_first_stream_ordering_after_ts(self) -> None: def add_event(so: int, ts: int) -> None: diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 8dfaa0559b26..9c1182ed16b5 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -115,6 +115,6 @@ def test_purge_room(self): ) # The events aren't found. - self.store._invalidate_get_event_cache(create_event.event_id) + self.store._invalidate_local_get_event_cache(create_event.event_id) self.get_failure(self.store.get_event(create_event.event_id), NotFoundError) self.get_failure(self.store.get_event(first["event_id"]), NotFoundError) diff --git a/tests/replication/slave/storage/test_receipts.py b/tests/storage/test_receipts.py similarity index 84% rename from tests/replication/slave/storage/test_receipts.py rename to tests/storage/test_receipts.py index 19f57115a1c4..b1a8f8bba7d3 100644 --- a/tests/replication/slave/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -13,23 +13,21 @@ # limitations under the License. from synapse.api.constants import ReceiptTypes -from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.types import UserID, create_requester from tests.test_utils.event_injection import create_event - -from ._base import BaseSlavedStoreTestCase +from tests.unittest import HomeserverTestCase OTHER_USER_ID = "@other:test" OUR_USER_ID = "@our:test" -class SlavedReceiptTestCase(BaseSlavedStoreTestCase): - - STORE_TYPE = SlavedReceiptsStore - +class ReceiptTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): super().prepare(reactor, clock, homeserver) + + self.store = homeserver.get_datastores().main + self.room_creator = homeserver.get_room_creation_handler() self.persist_event_storage_controller = ( self.hs.get_storage_controllers().persistence @@ -87,14 +85,14 @@ def prepare(self, reactor, clock, homeserver): def test_return_empty_with_no_data(self): res = self.get_success( - self.master_store.get_receipts_for_user( + self.store.get_receipts_for_user( OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) ) self.assertEqual(res, {}) res = self.get_success( - self.master_store.get_receipts_for_user_with_orderings( + self.store.get_receipts_for_user_with_orderings( OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], ) @@ -102,7 +100,7 @@ def test_return_empty_with_no_data(self): self.assertEqual(res, {}) res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], @@ -121,20 +119,20 @@ def test_get_receipts_for_user(self): # Send public read receipt for the first event self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} ) ) # Send private read receipt for the second event self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} ) ) # Test we get the latest event when we want both private and public receipts res = self.get_success( - self.master_store.get_receipts_for_user( + self.store.get_receipts_for_user( OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) ) @@ -142,26 +140,24 @@ def test_get_receipts_for_user(self): # Test we get the older event when we want only public receipt res = self.get_success( - self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) ) self.assertEqual(res, {self.room_id1: event1_1_id}) # Test we get the latest event when we want only the public receipt res = self.get_success( - self.master_store.get_receipts_for_user( - OUR_USER_ID, [ReceiptTypes.READ_PRIVATE] - ) + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ_PRIVATE]) ) self.assertEqual(res, {self.room_id1: event1_2_id}) # Test receipt updating self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} ) ) res = self.get_success( - self.master_store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) + self.store.get_receipts_for_user(OUR_USER_ID, [ReceiptTypes.READ]) ) self.assertEqual(res, {self.room_id1: event1_2_id}) @@ -172,12 +168,12 @@ def test_get_receipts_for_user(self): # Test new room is reflected in what the method returns self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( - self.master_store.get_receipts_for_user( + self.store.get_receipts_for_user( OUR_USER_ID, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE] ) ) @@ -194,20 +190,20 @@ def test_get_last_receipt_event_id_for_user(self): # Send public read receipt for the first event self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_1_id], {} ) ) # Send private read receipt for the second event self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event1_2_id], {} ) ) # Test we get the latest event when we want both private and public receipts res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], @@ -217,7 +213,7 @@ def test_get_last_receipt_event_id_for_user(self): # Test we get the older event when we want only public receipt res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] ) ) @@ -225,7 +221,7 @@ def test_get_last_receipt_event_id_for_user(self): # Test we get the latest event when we want only the private receipt res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, [ReceiptTypes.READ_PRIVATE] ) ) @@ -233,12 +229,12 @@ def test_get_last_receipt_event_id_for_user(self): # Test receipt updating self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id1, ReceiptTypes.READ, OUR_USER_ID, [event1_2_id], {} ) ) res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id1, [ReceiptTypes.READ] ) ) @@ -251,12 +247,12 @@ def test_get_last_receipt_event_id_for_user(self): # Test new room is reflected in what the method returns self.get_success( - self.master_store.insert_receipt( + self.store.insert_receipt( self.room_id2, ReceiptTypes.READ_PRIVATE, OUR_USER_ID, [event2_1_id], {} ) ) res = self.get_success( - self.master_store.get_last_receipt_event_id_for_user( + self.store.get_last_receipt_event_id_for_user( OUR_USER_ID, self.room_id2, [ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE], diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 1218786d79d8..240b02cb9f43 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -23,7 +23,6 @@ from tests import unittest from tests.server import TestHomeServer -from tests.test_utils import event_injection class RoomMemberStoreTestCase(unittest.HomeserverTestCase): @@ -110,60 +109,6 @@ def test_count_known_servers_stat_counter_enabled(self) -> None: # It now knows about Charlie's server. self.assertEqual(self.store._known_servers_count, 2) - def test_get_joined_users_from_context(self) -> None: - room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) - bob_event = self.get_success( - event_injection.inject_member_event( - self.hs, room, self.u_bob, Membership.JOIN - ) - ) - - # first, create a regular event - event, context = self.get_success( - event_injection.create_event( - self.hs, - room_id=room, - sender=self.u_alice, - prev_event_ids=[bob_event.event_id], - type="m.test.1", - content={}, - ) - ) - - users = self.get_success( - self.store.get_joined_users_from_context(event, context) - ) - self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) - - # Regression test for #7376: create a state event whose key matches bob's - # user_id, but which is *not* a membership event, and persist that; then check - # that `get_joined_users_from_context` returns the correct users for the next event. - non_member_event = self.get_success( - event_injection.inject_event( - self.hs, - room_id=room, - sender=self.u_bob, - prev_event_ids=[bob_event.event_id], - type="m.test.2", - state_key=self.u_bob, - content={}, - ) - ) - event, context = self.get_success( - event_injection.create_event( - self.hs, - room_id=room, - sender=self.u_alice, - prev_event_ids=[non_member_event.event_id], - type="m.test.3", - content={}, - ) - ) - users = self.get_success( - self.store.get_joined_users_from_context(event, context) - ) - self.assertEqual(users.keys(), {self.u_alice, self.u_bob}) - def test__null_byte_in_display_name_properly_handled(self) -> None: room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 8043bdbde2c7..5564161750e6 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -369,8 +369,8 @@ def test_get_state_for_event(self): state_dict_ids = cache_entry.value self.assertEqual(cache_entry.full, False) - self.assertEqual(cache_entry.known_absent, {(e1.type, e1.state_key)}) - self.assertDictEqual(state_dict_ids, {(e1.type, e1.state_key): e1.event_id}) + self.assertEqual(cache_entry.known_absent, set()) + self.assertDictEqual(state_dict_ids, {}) ############################################ # test that things work with a partial cache @@ -387,7 +387,7 @@ def test_get_state_for_event(self): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) room_id = self.room.to_string() (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( @@ -412,7 +412,7 @@ def test_get_state_for_event(self): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, @@ -443,7 +443,7 @@ def test_get_state_for_event(self): ) self.assertEqual(is_all, False) - self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) + self.assertDictEqual({}, state_dict) (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 371cd201af12..e42d7b9ba080 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -19,7 +19,7 @@ from synapse import event_auth from synapse.api.constants import EventContentFields -from synapse.api.errors import AuthError +from synapse.api.errors import AuthError, SynapseError from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.storage.databases.main.events_worker import EventRedactBehaviour @@ -689,6 +689,45 @@ def test_join_rules_msc3083_restricted(self) -> None: auth_events.values(), ) + def test_room_v10_rejects_string_power_levels(self) -> None: + pl_event_content = {"users_default": "42"} + pl_event = make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + **_maybe_get_event_id_dict_for_room_version(RoomVersions.V10), + "type": "m.room.power_levels", + "sender": "@test:test.com", + "state_key": "", + "content": pl_event_content, + "signatures": {"test.com": {"ed25519:0": "some9signature"}}, + }, + room_version=RoomVersions.V10, + ) + + pl_event2_content = {"events": {"m.room.name": "42", "m.room.power_levels": 42}} + pl_event2 = make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + **_maybe_get_event_id_dict_for_room_version(RoomVersions.V10), + "type": "m.room.power_levels", + "sender": "@test:test.com", + "state_key": "", + "content": pl_event2_content, + "signatures": {"test.com": {"ed25519:0": "some9signature"}}, + }, + room_version=RoomVersions.V10, + ) + + with self.assertRaises(SynapseError): + event_auth._check_power_levels( + pl_event.room_version, pl_event, {("fake_type", "fake_key"): pl_event2} + ) + + with self.assertRaises(SynapseError): + event_auth._check_power_levels( + pl_event.room_version, pl_event2, {("fake_type", "fake_key"): pl_event} + ) + # helpers for making events TEST_DOMAIN = "example.com" diff --git a/tests/test_federation.py b/tests/test_federation.py index 0cbef70bfa57..779fad1f6398 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -81,12 +81,8 @@ def setUp(self): self.handler = self.homeserver.get_federation_handler() federation_event_handler = self.homeserver.get_federation_event_handler() - async def _check_event_auth( - origin, - event, - context, - ): - return context + async def _check_event_auth(origin, event, context): + pass federation_event_handler._check_event_auth = _check_event_auth self.client = self.homeserver.get_federation_client() diff --git a/tests/test_server.py b/tests/test_server.py index fc4bce899c21..2fe4411401f0 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -231,7 +231,7 @@ def _make_request(self, method: bytes, path: bytes) -> FakeChannel: parse_listener_def({"type": "http", "port": 0}), self.resource, "1.0", - max_request_body_size=1234, + max_request_body_size=4096, reactor=self.reactor, ) diff --git a/tests/test_state.py b/tests/test_state.py index 6ca8d8f21d66..bafd6d1750ce 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -21,7 +21,7 @@ from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext -from synapse.state import StateHandler, StateResolutionHandler +from synapse.state import StateHandler, StateResolutionHandler, _make_state_cache_entry from synapse.util import Clock from synapse.util.macaroons import MacaroonGenerator @@ -99,6 +99,10 @@ async def store_state_group( state_group = self._next_group self._next_group += 1 + if current_state_ids is None: + current_state_ids = dict(self._group_to_state[prev_group]) + current_state_ids.update(delta_ids) + self._group_to_state[state_group] = dict(current_state_ids) return state_group @@ -760,3 +764,43 @@ def _get_context( result = yield defer.ensureDeferred(self.state.compute_event_context(event)) return result + + def test_make_state_cache_entry(self): + "Test that calculating a prev_group and delta is correct" + + new_state = { + ("a", ""): "E", + ("b", ""): "E", + ("c", ""): "E", + ("d", ""): "E", + } + + # old_state_1 has fewer differences to new_state than old_state_2, but + # the delta involves deleting a key, which isn't allowed in the deltas, + # so we should pick old_state_2 as the prev_group. + + # `old_state_1` has two differences: `a` and `e` + old_state_1 = { + ("a", ""): "F", + ("b", ""): "E", + ("c", ""): "E", + ("d", ""): "E", + ("e", ""): "E", + } + + # `old_state_2` has three differences: `a`, `c` and `d` + old_state_2 = { + ("a", ""): "F", + ("b", ""): "E", + ("c", ""): "F", + ("d", ""): "F", + } + + entry = _make_state_cache_entry(new_state, {1: old_state_1, 2: old_state_2}) + + self.assertEqual(entry.prev_group, 2) + + # There are three changes from `old_state_2` to `new_state` + self.assertEqual( + entry.delta_ids, {("a", ""): "E", ("c", ""): "E", ("d", ""): "E"} + ) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 37fada5c5369..d3c13cf14c28 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactorClock @@ -51,7 +50,7 @@ def prepare(self, reactor, clock, hs): def test_ui_auth(self): # Do a UI auth request - request_data = json.dumps({"username": "kermit", "password": "monkey"}) + request_data = {"username": "kermit", "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.result["code"], b"401", channel.result) @@ -82,16 +81,14 @@ def test_ui_auth(self): self.assertDictContainsSubset(channel.json_body["params"], expected_params) # We have to complete the dummy auth stage before completing the terms stage - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.dummy", - }, - } - ) + request_data = { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.dummy", + }, + } self.registration_handler.check_username = Mock(return_value=True) @@ -102,16 +99,14 @@ def test_ui_auth(self): self.assertEqual(channel.result["code"], b"401", channel.result) # Finish the UI auth for terms - request_data = json.dumps( - { - "username": "kermit", - "password": "monkey", - "auth": { - "session": channel.json_body["session"], - "type": "m.login.terms", - }, - } - ) + request_data = { + "username": "kermit", + "password": "monkey", + "auth": { + "session": channel.json_body["session"], + "type": "m.login.terms", + }, + } channel = self.make_request(b"POST", self.url, request_data) # We're interested in getting a response that looks like a successful diff --git a/tests/test_visibility.py b/tests/test_visibility.py index f338af6c36d0..c385b2f8d479 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -272,7 +272,7 @@ def test_out_of_band_invite_rejection(self): "state_key": "@user:test", "content": {"membership": "invite"}, } - self.add_hashes_and_signatures(invite_pdu) + self.add_hashes_and_signatures_from_other_server(invite_pdu) invite_event_id = make_event_from_dict(invite_pdu, RoomVersions.V9).event_id self.get_success( diff --git a/tests/unittest.py b/tests/unittest.py index c645dd35630a..66ce92f4a6e4 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -16,7 +16,6 @@ import gc import hashlib import hmac -import json import logging import secrets import time @@ -285,7 +284,7 @@ def setUp(self): config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", - max_request_body_size=1234, + max_request_body_size=4096, reactor=self.reactor, ) @@ -619,20 +618,16 @@ def register_user( want_mac.update(nonce.encode("ascii") + b"\x00" + nonce_str) want_mac_digest = want_mac.hexdigest() - body = json.dumps( - { - "nonce": nonce, - "username": username, - "displayname": displayname, - "password": password, - "admin": admin, - "mac": want_mac_digest, - "inhibit_login": True, - } - ) - channel = self.make_request( - "POST", "/_synapse/admin/v1/register", body.encode("utf8") - ) + body = { + "nonce": nonce, + "username": username, + "displayname": displayname, + "password": password, + "admin": admin, + "mac": want_mac_digest, + "inhibit_login": True, + } + channel = self.make_request("POST", "/_synapse/admin/v1/register", body) self.assertEqual(channel.code, 200, channel.json_body) user_id = channel.json_body["user_id"] @@ -676,9 +671,7 @@ def login( custom_headers: Optional[Iterable[CustomHeaderType]] = None, ) -> str: """ - Log in a user, and get an access token. Requires the Login API be - registered. - + Log in a user, and get an access token. Requires the Login API be registered. """ body = {"type": "m.login.password", "user": username, "password": password} if device_id: @@ -687,7 +680,7 @@ def login( channel = self.make_request( "POST", "/_matrix/client/r0/login", - json.dumps(body).encode("utf8"), + body, custom_headers=custom_headers, ) self.assertEqual(channel.code, 200, channel.result) @@ -780,7 +773,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): verify_key_id, FetchKeyResult( verify_key=verify_key, - valid_until_ts=clock.time_msec() + 1000, + valid_until_ts=clock.time_msec() + 10000, ), ) ], @@ -838,7 +831,7 @@ def make_signed_federation_request( client_ip=client_ip, ) - def add_hashes_and_signatures( + def add_hashes_and_signatures_from_other_server( self, event_dict: JsonDict, room_version: RoomVersion = KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index bee66dee4328..e8b6246ab552 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -20,7 +20,7 @@ class DictCacheTestCase(unittest.TestCase): def setUp(self): - self.cache = DictionaryCache("foobar") + self.cache = DictionaryCache("foobar", max_entries=10) def test_simple_cache_hit_full(self): key = "test_simple_cache_hit_full" @@ -76,13 +76,13 @@ def test_multi_insert(self): seq = self.cache.sequence test_value_1 = {"test": "test_simple_cache_hit_miss_partial"} - self.cache.update(seq, key, test_value_1, fetched_keys=set("test")) + self.cache.update(seq, key, test_value_1, fetched_keys={"test"}) seq = self.cache.sequence test_value_2 = {"test2": "test_simple_cache_hit_miss_partial2"} - self.cache.update(seq, key, test_value_2, fetched_keys=set("test2")) + self.cache.update(seq, key, test_value_2, fetched_keys={"test2"}) - c = self.cache.get(key) + c = self.cache.get(key, dict_keys=["test", "test2"]) self.assertEqual( { "test": "test_simple_cache_hit_miss_partial", @@ -90,3 +90,30 @@ def test_multi_insert(self): }, c.value, ) + self.assertEqual(c.full, False) + + def test_invalidation(self): + """Test that the partial dict and full dicts get invalidated + separately. + """ + key = "some_key" + + seq = self.cache.sequence + # start by populating a "full dict" entry + self.cache.update(seq, key, {"a": "b", "c": "d"}) + + # add a bunch of individual entries, also keeping the individual + # entry for "a" warm. + for i in range(20): + self.cache.get(key, ["a"]) + self.cache.update(seq, f"key{i}", {1: 2}) + + # We should have evicted the full dict... + r = self.cache.get(key) + self.assertFalse(r.full) + self.assertTrue("c" not in r.value) + + # ... but kept the "a" entry that we kept querying. + r = self.cache.get(key, dict_keys=["a"]) + self.assertFalse(r.full) + self.assertEqual(r.value, {"a": "b"}) diff --git a/tests/utils.py b/tests/utils.py index 424cc4c2a0d6..d2c6d1e85242 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -167,6 +167,7 @@ def default_config( "local": {"per_second": 10000, "burst_count": 10000}, "remote": {"per_second": 10000, "burst_count": 10000}, }, + "rc_joins_per_room": {"per_second": 10000, "burst_count": 10000}, "rc_invites": { "per_room": {"per_second": 10000, "burst_count": 10000}, "per_user": {"per_second": 10000, "burst_count": 10000},