diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index 6fb6b81deb..b1dd8a75e7 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -87,7 +87,7 @@ jobs:
- name: Build and push by digest
id: build
- uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
+ uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
with:
push: true
labels: |
diff --git a/.github/workflows/famedly-tests.yml b/.github/workflows/famedly-tests.yml
index d4386b9c21..fe6dd2a777 100644
--- a/.github/workflows/famedly-tests.yml
+++ b/.github/workflows/famedly-tests.yml
@@ -26,7 +26,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@v2
with:
python-version: "3.13"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.0"
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
@@ -62,7 +62,7 @@ jobs:
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@v2
with:
- poetry-version: "2.1.1"
+ poetry-version: "2.2.0"
python-version: "3.13"
install-project: "false"
@@ -93,7 +93,7 @@ jobs:
# To make CI green, err towards caution and install the project.
install-project: "true"
python-version: "3.13"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.0"
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
@@ -223,7 +223,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@v2
with:
python-version: ${{ matrix.job.python-version }}
- poetry-version: "2.1.1"
+ poetry-version: "2.2.0"
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml
index babc3bc5de..4752b6afeb 100644
--- a/.github/workflows/fix_lint.yaml
+++ b/.github/workflows/fix_lint.yaml
@@ -31,7 +31,7 @@ jobs:
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
install-project: "false"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
- name: Run ruff check
continue-on-error: true
diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml
index 5bc78062cd..2d945c2096 100644
--- a/.github/workflows/latest_deps.yml
+++ b/.github/workflows/latest_deps.yml
@@ -54,7 +54,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
python-version: "3.x"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: "all"
# Dump installed versions for debugging.
- run: poetry run pip list > before.txt
diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml
index 12b4720ca5..7793172e55 100644
--- a/.github/workflows/push_complement_image.yml
+++ b/.github/workflows/push_complement_image.yml
@@ -69,6 +69,7 @@ jobs:
run: |
for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
echo "tag and push $TAG"
- docker tag complement-synapse $TAG
+ # `localhost/complement-synapse` should match the image created by `scripts-dev/complement.sh`
+ docker tag localhost/complement-synapse $TAG
docker push $TAG
done
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 98b47130a1..a03d27472d 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -95,7 +95,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
python-version: "3.x"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: "all"
- run: poetry run scripts-dev/generate_sample_config.sh --check
- run: poetry run scripts-dev/config-lint.sh
@@ -134,7 +134,7 @@ jobs:
- name: Setup Poetry
uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
install-project: "false"
- name: Run ruff check
@@ -169,7 +169,7 @@ jobs:
# https://github.com/matrix-org/synapse/pull/15376#issuecomment-1498983775
# To make CI green, err towards caution and install the project.
install-project: "true"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
# Cribbed from
# https://github.com/AustinScola/mypy-cache-github-action/blob/85ea4f2972abed39b33bd02c36e341b28ca59213/src/restore.ts#L10-L17
@@ -265,7 +265,7 @@ jobs:
# Install like a normal project from source with all optional dependencies
extras: all
install-project: "true"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
- name: Ensure `Cargo.lock` is up to date (no stray changes after install)
# The `::error::` syntax is using GitHub Actions' error annotations, see
@@ -398,7 +398,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
python-version: ${{ matrix.job.python-version }}
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.job.postgres-version }}
@@ -500,7 +500,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
python-version: ${{ matrix.python-version }}
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: ${{ matrix.extras }}
- run: poetry run trial --jobs=2 tests
- name: Dump logs
@@ -595,7 +595,7 @@ jobs:
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
env:
@@ -648,7 +648,7 @@ jobs:
- uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
with:
python-version: ${{ matrix.python-version }}
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
extras: "postgres"
- run: .ci/scripts/test_synapse_port_db.sh
id: run_tester_script
@@ -705,6 +705,13 @@ jobs:
toolchain: ${{ env.RUST_VERSION }}
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
+ # We use `poetry` in `complement.sh`
+ - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0
+ with:
+ poetry-version: "2.2.1"
+ # Matches the `path` where we checkout Synapse above
+ working-directory: "synapse"
+
- name: Prepare Complement's Prerequisites
run: synapse/.ci/scripts/setup_complement_prerequisites.sh
@@ -713,6 +720,32 @@ jobs:
cache-dependency-path: complement/go.sum
go-version-file: complement/go.mod
+ # Run the image sanity check test first as this is the first thing we want to know
+ # about (are we actually testing what we expect?) and we don't want to debug
+ # downstream failures (wild goose chase).
+ - name: Sanity check Complement image
+ id: run_sanity_check_complement_image_test
+ # -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
+ # are underpowered and don't like running tons of Synapse instances at once.
+ # -json: Output JSON format so that gotestfmt can parse it.
+ #
+ # tee /tmp/gotest-complement.log: We tee the output to a file so that we can re-process it
+ # later on for better formatting with gotestfmt. But we still want the command
+ # to output to the terminal as it runs so we can see what's happening in
+ # real-time.
+ run: |
+ set -o pipefail
+ COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh --in-repo -p 1 -json -run 'TestSynapseVersion/Synapse_version_matches_current_git_checkout' 2>&1 | tee /tmp/gotest-sanity-check-complement.log
+ shell: bash
+ env:
+ POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }}
+ WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }}
+
+ - name: Formatted sanity check Complement test logs
+ # Always run this step if we attempted to run the Complement tests.
+ if: always() && steps.run_sanity_check_complement_image_test.outcome != 'skipped'
+ run: cat /tmp/gotest-sanity-check-complement.log | gotestfmt -hide "successful-downloads,empty-packages"
+
- name: Run Complement Tests
id: run_complement_tests
# -p=1: We're using `-p 1` to force the test packages to run serially as GHA boxes
diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml
index 12fdbbe7c4..d38e38ebcb 100644
--- a/.github/workflows/twisted_trunk.yml
+++ b/.github/workflows/twisted_trunk.yml
@@ -54,7 +54,7 @@ jobs:
with:
python-version: "3.x"
extras: "all"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#${{ inputs.twisted_ref || 'trunk' }}
@@ -82,7 +82,7 @@ jobs:
with:
python-version: "3.x"
extras: "all test"
- poetry-version: "2.1.1"
+ poetry-version: "2.2.1"
- run: |
poetry remove twisted
poetry add --extras tls git+https://github.com/twisted/twisted.git#trunk
diff --git a/CHANGES.md b/CHANGES.md
index 8bcf1a050a..809ba8ae51 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -1,3 +1,46 @@
+# Synapse 1.150.0 (2026-03-24)
+
+No significant changes since 1.150.0rc1.
+
+
+
+
+# Synapse 1.150.0rc1 (2026-03-17)
+
+## Features
+
+- Add experimental support for the [MSC4370](https://github.com/matrix-org/matrix-spec-proposals/pull/4370) Federation API `GET /extremities` endpoint. ([\#19314](https://github.com/element-hq/synapse/issues/19314))
+- [MSC4140: Cancellable delayed events](https://github.com/matrix-org/matrix-spec-proposals/pull/4140): When persisting a delayed event to the timeline, include its `delay_id` in the event's `unsigned` section in `/sync` responses to the event sender. ([\#19479](https://github.com/element-hq/synapse/issues/19479))
+- Expose [MSC4354 Sticky Events](https://github.com/matrix-org/matrix-spec-proposals/pull/4354) over the legacy (v3) /sync API. ([\#19487](https://github.com/element-hq/synapse/issues/19487))
+- When Matrix Authentication Service (MAS) integration is enabled, allow MAS to set the user locked status in Synapse. ([\#19554](https://github.com/element-hq/synapse/issues/19554))
+
+## Bugfixes
+
+- Fix `Build and push complement image` CI job pointing to non-existent image. ([\#19523](https://github.com/element-hq/synapse/issues/19523))
+- Fix a bug introduced in v1.26.0 that caused deactivated, erased users to not be removed from the user directory. ([\#19542](https://github.com/element-hq/synapse/issues/19542))
+
+## Improved Documentation
+
+- In the Admin API documentation, always express path parameters as `/` instead of as `/$param`. ([\#19307](https://github.com/element-hq/synapse/issues/19307))
+- Update docs to clarify `outbound_federation_restricted_to` can also be used with the [Secure Border Gateway (SBG)](https://element.io/en/server-suite/secure-border-gateways). ([\#19517](https://github.com/element-hq/synapse/issues/19517))
+- Unify Complement developer docs. ([\#19518](https://github.com/element-hq/synapse/issues/19518))
+
+## Internal Changes
+
+- Put membership updates in a background resumable task when changing the avatar or the display name. ([\#19311](https://github.com/element-hq/synapse/issues/19311))
+- Add in-repo Complement test to sanity check Synapse version matches git checkout (testing what we think we are). ([\#19476](https://github.com/element-hq/synapse/issues/19476))
+- Migrate `dev` dependencies to [PEP 735](https://peps.python.org/pep-0735/) dependency groups. ([\#19490](https://github.com/element-hq/synapse/issues/19490))
+- Remove the optional `systemd-python` dependency and the `systemd` extra on the `synapse` package. ([\#19491](https://github.com/element-hq/synapse/issues/19491))
+- Avoid re-computing the event ID when cloning events. ([\#19527](https://github.com/element-hq/synapse/issues/19527))
+- Allow caching of the `/versions` and `/auth_metadata` public endpoints. ([\#19530](https://github.com/element-hq/synapse/issues/19530))
+- Add a few labels to the number groupings in the `Processed request` logs. ([\#19548](https://github.com/element-hq/synapse/issues/19548))
+
+### Famedly additions for v1.150.0_1
+
+- chore: Prohibit dependencies on included modules from causing unexpected version changes [\#247](https://github.com/famedly/synapse/pull/247)
+
+
+
# Synapse 1.149.1 (2026-03-11)
## Internal Changes
diff --git a/Cargo.lock b/Cargo.lock
index 340114f801..d6945bfbb7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.101"
+version = "1.0.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea"
+checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]]
name = "arc-swap"
@@ -844,9 +844,9 @@ dependencies = [
[[package]]
name = "pyo3-log"
-version = "0.13.2"
+version = "0.13.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f8bae9ad5ba08b0b0ed2bb9c2bdbaeccc69cafca96d78cf0fbcea0d45d122bb"
+checksum = "26c2ec80932c5c3b2d4fbc578c9b56b2d4502098587edb8bef5b6bfcad43682e"
dependencies = [
"arc-swap",
"log",
@@ -910,9 +910,9 @@ dependencies = [
[[package]]
name = "quinn-proto"
-version = "0.11.12"
+version = "0.11.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e"
+checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
dependencies = [
"bytes",
"getrandom 0.3.3",
diff --git a/complement/README.md b/complement/README.md
index c4a95bdd63..77f7ee182d 100644
--- a/complement/README.md
+++ b/complement/README.md
@@ -8,8 +8,7 @@ ensure everything works at a holistic level.
## Setup
Nothing beyond a [normal Complement
-setup](https://github.com/matrix-org/complement?tab=readme-ov-file#running) (just Go and
-Docker).
+setup](https://github.com/matrix-org/complement#running) (just Go and Docker).
## Running tests
@@ -28,14 +27,39 @@ scripts-dev/complement.sh ./tests/csapi/... -run TestRoomCreate/Parallel/POST_/c
scripts-dev/complement.sh ./tests/... -run 'TestRoomCreate/Parallel/POST_/createRoom_makes_a_(.*)'
```
-Typically, if you're developing the Synapse and Complement tests side-by-side, you will
-run something like this:
+It's often nice to develop on Synapse and write Complement tests at the same time.
+Here is how to run your local Synapse checkout against your local Complement checkout.
```shell
# To run a specific test
COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh ./tests/csapi/... -run TestRoomCreate
```
+The above will run a monolithic (single-process) Synapse with SQLite as the database.
+For other configurations, try:
+
+- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
+- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This
+ option implies the use of Postgres.
+ - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker types
+ you wish to test. A simple comma-delimited string containing the worker types
+ defined from the `WORKERS_CONFIG` template in
+ [here](https://github.com/element-hq/synapse/blob/develop/docker/configure_workers_and_start.py#L54).
+ A safe example would be `WORKER_TYPES="federation_inbound, federation_sender,
+ synchrotron"`. See the [worker documentation](../workers.md) for additional
+ information on workers.
+- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the asyncio-backed
+ reactor with Twisted instead of the default one.
+- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime,
+ instead of docker.
+- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis,
+ and Postgres(when applicable).
+
+To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
+```sh
+SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestRoomCreate
+```
+
### Running in-repo tests
@@ -52,3 +76,20 @@ To run the in-repo Complement tests, use the `--in-repo` command line argument.
# Similarly, you can also use `-run` to specify all or part of a specific test path to run
scripts-dev/complement.sh --in-repo ./tests/... -run TestIntraShardFederation
```
+
+### Access database for homeserver after Complement test runs.
+
+If you're curious what the database looks like after you run some tests, here are some
+steps to get you going in Synapse:
+
+1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with
+ `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests
+ complete
+1. Start the Complement tests
+1. Find the name of the container, `docker ps -f name=complement_` (this will filter for
+ just the Complement related Docker containers)
+1. Access the container replacing the name with what you found in the previous step:
+ `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash`
+1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3`
+1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path
+ comes from the Synapse homeserver.yaml)
diff --git a/complement/go.mod b/complement/go.mod
index c6c1678bac..2a9adb9b95 100644
--- a/complement/go.mod
+++ b/complement/go.mod
@@ -31,6 +31,7 @@ require (
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/matrix-org/gomatrix v0.0.0-20220926102614-ceba4d9f7530 // indirect
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.5.2 // indirect
diff --git a/complement/go.sum b/complement/go.sum
index c674730c05..79a35aa14c 100644
--- a/complement/go.sum
+++ b/complement/go.sum
@@ -38,6 +38,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
+github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
+github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
github.com/hashicorp/go-set/v3 v3.0.0 h1:CaJBQvQCOWoftrBcDt7Nwgo0kdpmrKxar/x2o6pV9JA=
github.com/hashicorp/go-set/v3 v3.0.0/go.mod h1:IEghM2MpE5IaNvL+D7X480dfNtxjRXZ6VMpK3C8s2ok=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -50,6 +52,8 @@ github.com/matrix-org/gomatrixserverlib v0.0.0-20250813150445-9f5070a65744 h1:5G
github.com/matrix-org/gomatrixserverlib v0.0.0-20250813150445-9f5070a65744/go.mod h1:b6KVfDjXjA5Q7vhpOaMqIhFYvu5BuFVZixlNeTV/CLc=
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66 h1:6z4KxomXSIGWqhHcfzExgkH3Z3UkIXry4ibJS4Aqz2Y=
github.com/matrix-org/util v0.0.0-20221111132719-399730281e66/go.mod h1:iBI1foelCqA09JJgPV0FYz4qA5dUXYOxMi57FxKBdd4=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
@@ -120,6 +124,8 @@ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -129,6 +135,8 @@ golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
+golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -147,6 +155,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
+golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -160,6 +170,8 @@ google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3i
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
+gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/complement/tests/synapse_version_check_test.go b/complement/tests/synapse_version_check_test.go
new file mode 100644
index 0000000000..790a6a40ce
--- /dev/null
+++ b/complement/tests/synapse_version_check_test.go
@@ -0,0 +1,101 @@
+// This file is licensed under the Affero General Public License (AGPL) version 3.
+//
+// Copyright (C) 2026 Element Creations Ltd
+//
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// See the GNU Affero General Public License for more details:
+// .
+
+package synapse_tests
+
+import (
+ "net/http"
+ "os/exec"
+ "strings"
+ "testing"
+
+ "github.com/matrix-org/complement"
+ "github.com/matrix-org/complement/match"
+ "github.com/matrix-org/complement/must"
+ "github.com/tidwall/gjson"
+)
+
+func TestSynapseVersion(t *testing.T) {
+ deployment := complement.Deploy(t, 1)
+ defer deployment.Destroy(t)
+
+ unauthedClient := deployment.UnauthenticatedClient(t, "hs1")
+
+ // Sanity check that the version of Synapse used in the `COMPLEMENT_BASE_IMAGE`
+ // matches the same git commit we have checked out. This ensures that the image being
+ // used in Complement is the one that we just built locally with `complement.sh`
+ // instead of accidentally pulling in some remote one.
+ //
+ // This test is expected to pass if you use `complement.sh`.
+ //
+ // If this test fails, it probably means that Complement is using an image that
+ // doesn't encompass the changes you have checked out (unexpected). We want to yell
+ // loudly and point out what's wrong instead of silently letting your PRs pass
+ // without actually being tested.
+ t.Run("Synapse version matches current git checkout", func(t *testing.T) {
+ // Get the Synapse version details of the current git checkout
+ checkoutSynapseVersion := runCommand(
+ t,
+ []string{
+ "poetry",
+ "run",
+ "python",
+ "-c",
+ "from synapse.util import SYNAPSE_VERSION; print(SYNAPSE_VERSION)",
+ },
+ )
+
+ // Find the version details of the Synapse instance deployed from the Docker image
+ res := unauthedClient.MustDo(t, "GET", []string{"_matrix", "federation", "v1", "version"})
+ body := must.MatchResponse(t, res, match.HTTPResponse{
+ StatusCode: http.StatusOK,
+ JSON: []match.JSON{
+ match.JSONKeyPresent("server"),
+ },
+ })
+ rawSynapseVersionString := gjson.GetBytes(body, "server.version").Str
+ t.Logf(
+ "Synapse version string from federation version endpoint: %s",
+ rawSynapseVersionString,
+ )
+
+ must.Equal(
+ t,
+ rawSynapseVersionString,
+ checkoutSynapseVersion,
+ "Synapse version in the checkout doesn't match the Synapse version that Complement is running. "+
+ "If this test fails, it probably means that Complement is using an image that "+
+ "doesn't encompass the changes you have checked out (unexpected). We want to yell "+
+ "loudly and point out what's wrong instead of silently letting your PRs pass "+
+ "without actually being tested.",
+ )
+ })
+}
+
+// runCommand will run the given command and return the stdout (whitespace
+// trimmed).
+func runCommand(t *testing.T, commandPieces []string) string {
+ t.Helper()
+
+ // Then run our actual command
+ cmd := exec.Command(commandPieces[0], commandPieces[1:]...)
+ output, err := cmd.Output()
+ if err != nil {
+ t.Fatalf(
+ "runCommand: failed to run command (%s): %v",
+ strings.Join(commandPieces, " "),
+ err,
+ )
+ }
+
+ return strings.TrimSpace(string(output))
+}
diff --git a/contrib/systemd/README.md b/contrib/systemd/README.md
index 3c2cce74fa..ccdbe16388 100644
--- a/contrib/systemd/README.md
+++ b/contrib/systemd/README.md
@@ -16,3 +16,9 @@ appropriate locations of your installation.
5. Start Synapse: `sudo systemctl start matrix-synapse`
6. Verify Synapse is running: `sudo systemctl status matrix-synapse`
7. *optional* Enable Synapse to start at system boot: `sudo systemctl enable matrix-synapse`
+
+## Logging
+
+If you use `contrib/systemd/log_config.yaml`, install `systemd-python` in the
+same Python environment as Synapse. The config uses
+`systemd.journal.JournalHandler`, which requires that package.
diff --git a/contrib/systemd/log_config.yaml b/contrib/systemd/log_config.yaml
index 22f67a50ce..4c29787647 100644
--- a/contrib/systemd/log_config.yaml
+++ b/contrib/systemd/log_config.yaml
@@ -1,5 +1,6 @@
version: 1
+# Requires the `systemd-python` package in Synapse's runtime environment.
# In systemd's journal, loglevel is implicitly stored, so let's omit it
# from the message text.
formatters:
diff --git a/debian/build_virtualenv b/debian/build_virtualenv
index 9e7fb95c8e..7bbf52ddd9 100755
--- a/debian/build_virtualenv
+++ b/debian/build_virtualenv
@@ -35,12 +35,14 @@ TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
-pip install poetry==2.1.1 poetry-plugin-export==1.9.0
+pip install poetry==2.2.1 poetry-plugin-export==1.9.0
poetry export \
--extras all \
--extras test \
- --extras systemd \
-o exported_requirements.txt
+# Keep journald logging support in the packaged virtualenv when using
+# systemd.journal.JournalHandler in /etc/matrix-synapse/log.yaml.
+echo "systemd-python==235 --hash=sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a" >> exported_requirements.txt
deactivate
rm -rf "$TEMP_VENV"
@@ -57,7 +59,7 @@ dh_virtualenv \
--extra-pip-arg="--no-deps" \
--extra-pip-arg="--no-cache-dir" \
--extra-pip-arg="--compile" \
- --extras="all,systemd,test" \
+ --extras="all,test" \
--requirements="exported_requirements.txt"
PACKAGE_BUILD_DIR="$(pwd)/debian/matrix-synapse-py3"
diff --git a/debian/changelog b/debian/changelog
index b0bc62684c..c8146764de 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,20 @@
+matrix-synapse-py3 (1.150.0) stable; urgency=medium
+
+ * New synapse release 1.150.0.
+
+ -- Synapse Packaging team Tue, 24 Mar 2026 14:17:04 +0000
+
+matrix-synapse-py3 (1.150.0~rc1) stable; urgency=medium
+
+ [ Quentin Gliech ]
+ * Change how the systemd journald integration is installed.
+ * Update Poetry used at build time to 2.2.1.
+
+ [ Synapse Packaging team ]
+ * New synapse release 1.150.0rc1.
+
+ -- Synapse Packaging team Tue, 17 Mar 2026 14:56:35 +0000
+
matrix-synapse-py3 (1.149.1) stable; urgency=medium
* New synapse release 1.149.1.
diff --git a/docker/Dockerfile b/docker/Dockerfile
index e119dbaf0e..b3202a1ec7 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -22,7 +22,7 @@
ARG DEBIAN_VERSION=trixie
ARG PYTHON_VERSION=3.13
-ARG POETRY_VERSION=2.1.1
+ARG POETRY_VERSION=2.2.1
###
### Stage 0: generate requirements.txt
@@ -170,6 +170,14 @@ FROM docker.io/library/python:${PYTHON_VERSION}-slim-${DEBIAN_VERSION}
ARG TARGETARCH
+# If specified, Synapse will use this as the version string in the app.
+#
+# This can be useful to capture the git info of the build as `.git/` won't be
+# available in the Docker image for Synapse to generate from.
+ARG SYNAPSE_VERSION_STRING
+# Pass it through to Synapse as an environment variable.
+ENV SYNAPSE_VERSION_STRING=${SYNAPSE_VERSION_STRING}
+
LABEL org.opencontainers.image.url='https://github.com/famedly/synapse'
LABEL org.opencontainers.image.documentation='https://github.com/famedly/synapse/blob/master/docker/README.md'
LABEL org.opencontainers.image.source='https://github.com/famedly/synapse.git'
diff --git a/docker/README-testing.md b/docker/README-testing.md
index bad66a65db..a230aab562 100644
--- a/docker/README-testing.md
+++ b/docker/README-testing.md
@@ -12,11 +12,11 @@ Note that running Synapse's unit tests from within the docker image is not suppo
`scripts-dev/complement.sh` is a script that will automatically build
and run Synapse against Complement.
-Consult the [contributing guide][guideComplementSh] for instructions on how to use it.
-
[guideComplementSh]: https://famedly.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement
+Consult [Element's Complement docs][https://github.com/element-hq/synapse/tree/develop/complement] for instructions on how to use it.
+
## Building and running the images manually
Under some circumstances, you may wish to build the images manually.
diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md
index 9e0a1cb70c..14f86fa976 100644
--- a/docs/admin_api/user_admin_api.md
+++ b/docs/admin_api/user_admin_api.md
@@ -403,6 +403,7 @@ is set to `true`:
- Remove the user's display name
- Remove the user's avatar URL
+- Remove the user's custom profile fields
- Mark the user as erased
The following actions are **NOT** performed. The list may be incomplete.
@@ -599,7 +600,7 @@ Fetches the number of invites sent by the provided user ID across all rooms
after the given timestamp.
```
-GET /_synapse/admin/v1/users/$user_id/sent_invite_count
+GET /_synapse/admin/v1/users//sent_invite_count
```
**Parameters**
@@ -633,7 +634,7 @@ Fetches the number of rooms that the user joined after the given timestamp, even
if they have subsequently left/been banned from those rooms.
```
-GET /_synapse/admin/v1/users/$/cumulative_joined_room_count
```
**Parameters**
@@ -1438,7 +1439,7 @@ The request and response format is the same as the
The API is:
```
-GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id
+GET /_synapse/admin/v1/auth_providers//users/
```
When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned:
@@ -1477,7 +1478,7 @@ _Added in Synapse 1.68.0._
The API is:
```
-GET /_synapse/admin/v1/threepid/$medium/users/$address
+GET /_synapse/admin/v1/threepid//users/
```
When a user matched the given address for the given medium, an HTTP code `200` with a response body like the following is returned:
@@ -1521,7 +1522,7 @@ is provided to override the default and allow the admin to issue the redactions
The API is
```
-POST /_synapse/admin/v1/user/$user_id/redact
+POST /_synapse/admin/v1/user//redact
{
"rooms": ["!roomid1", "!roomid2"]
@@ -1570,7 +1571,7 @@ or until Synapse is restarted (whichever happens first).
The API is:
```
-GET /_synapse/admin/v1/user/redact_status/$redact_id
+GET /_synapse/admin/v1/user/redact_status/
```
A response body like the following is returned:
diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md
index c8c04c3e02..3fc3b9fd22 100644
--- a/docs/development/contributing_guide.md
+++ b/docs/development/contributing_guide.md
@@ -330,46 +330,8 @@ For more details about other configurations, see the [Docker-specific documentat
## Run the integration tests ([Complement](https://github.com/famedly/complement)).
-[Complement](https://github.com/famedly/complement) is a suite of black box tests that can be run on any homeserver implementation. It can also be thought of as end-to-end (e2e) tests. This is our own fork of complement, the upstream repository owned by matrix-org is [here](https://github.com/matrix-org/complement)
-
-It's often nice to develop on Synapse and write Complement tests at the same time.
-Here is how to run your local Synapse checkout against your local Complement checkout.
-
-(checkout [`complement`](https://github.com/matrix-org/complement) alongside your `synapse` checkout)
-```sh
-COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh
-```
-
-To run a specific test file, you can pass the test name at the end of the command. The name passed comes from the naming structure in your Complement tests. If you're unsure of the name, you can do a full run and copy it from the test output:
-
-```sh
-COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
-```
-
-To run a specific test, you can specify the whole name structure:
-
-```sh
-COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order
-```
-
-The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try:
-
-- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead.
-- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres.
- - If setting `WORKERS=1`, optionally set `WORKER_TYPES=` to declare which worker
- types you wish to test. A simple comma-delimited string containing the worker types
- defined from the `WORKERS_CONFIG` template in
- [here](https://github.com/famedly/synapse/blob/master/docker/configure_workers_and_start.py#L54).
- A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`.
- See the [worker documentation](../workers.md) for additional information on workers.
-- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one.
-- Passing `PODMAN=1` will use the [podman](https://podman.io/) container runtime, instead of docker.
-- Passing `UNIX_SOCKETS=1` will utilise Unix socket functionality for Synapse, Redis, and Postgres(when applicable).
-
-To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g:
-```sh
-SYNAPSE_TEST_LOG_LEVEL=DEBUG COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages
-```
+See the [Complement docs](https://github.com/famedly/synapse/tree/develop/complement)
+for how to use the `./scripts-dev/complement.sh` test runner script.
### Prettier formatting with `gotestfmt`
@@ -385,18 +347,6 @@ COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -json | gotestfmt -hide
(Remove `-hide successful-tests` if you don't want to hide successful tests.)
-### Access database for homeserver after Complement test runs.
-
-If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse:
-
-1. In your Complement test comment out `defer deployment.Destroy(t)` and replace with `defer time.Sleep(2 * time.Hour)` to keep the homeserver running after the tests complete
-1. Start the Complement tests
-1. Find the name of the container, `docker ps -f name=complement_` (this will filter for just the Compelement related Docker containers)
-1. Access the container replacing the name with what you found in the previous step: `docker exec -it complement_1_hs_with_application_service.hs1_2 /bin/bash`
-1. Install sqlite (database driver), `apt-get update && apt-get install -y sqlite3`
-1. Then run `sqlite3` and open the database `.open /conf/homeserver.db` (this db path comes from the Synapse homeserver.yaml)
-
-
# 9. Submit your patch.
Once you're happy with your patch, it's time to prepare a Pull Request.
diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md
index 1b3348703f..fe0667194a 100644
--- a/docs/development/dependencies.md
+++ b/docs/development/dependencies.md
@@ -6,7 +6,7 @@ This is a quick cheat sheet for developers on how to use [`poetry`](https://pyth
See the [contributing guide](contributing_guide.md#4-install-the-dependencies).
-Developers should use Poetry 1.3.2 or higher. If you encounter problems related
+Developers should use Poetry 2.2.0 or higher. If you encounter problems related
to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version).
# Background
diff --git a/docs/setup/installation.md b/docs/setup/installation.md
index 54a326ddbb..1a27fb6023 100644
--- a/docs/setup/installation.md
+++ b/docs/setup/installation.md
@@ -228,6 +228,11 @@ pip install --upgrade setuptools
pip install matrix-synapse
```
+If you want to use a logging configuration that references
+`systemd.journal.JournalHandler` (for example `contrib/systemd/log_config.yaml`),
+you must install `systemd-python` separately in the same environment.
+Synapse no longer provides a `matrix-synapse[systemd]` extra.
+
This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse)
and install it, along with the python libraries it uses, into a virtual environment
under `~/synapse/env`. Feel free to pick a different directory if you
diff --git a/docs/upgrade.md b/docs/upgrade.md
index 5eed36555a..0833ae5a3c 100644
--- a/docs/upgrade.md
+++ b/docs/upgrade.md
@@ -117,6 +117,43 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
+# Upgrading to v1.150.0
+
+## Removal of the `systemd` pip extra
+
+The `matrix-synapse[systemd]` pip extra has been removed.
+If you use `systemd.journal.JournalHandler` in your logging configuration
+(e.g. `contrib/systemd/log_config.yaml`), you must now install
+`systemd-python` manually in Synapse's runtime environment:
+
+```bash
+pip install systemd-python
+```
+
+No action is needed if you do not use journal logging, or if you installed
+Synapse from the Debian packages (which handle this automatically).
+
+## Module API: Deprecation of the `deactivation` parameter in the `set_displayname` method
+
+If you have Synapse modules installed that use the `set_displayname` method to change
+the display name of your users, please ensure that it doesn't pass the optional
+`deactivation` parameter.
+
+This parameter is now deprecated and it is intended to be removed in 2027.
+No immediate change is necessary, however once the parameter is removed, modules passing it will produce errors.
+[Issue #19546](https://github.com/element-hq/synapse/issues/19546) tracks this removal.
+
+From this version, when the parameter is passed, an error such as
+``Deprecated `deactivation` parameter passed to `set_displayname` Module API (value: False). This will break in 2027.`` will be logged. The method will otherwise continue to work.
+
+## Updated request log format (`Processed request: ...`)
+
+The [request log format](usage/administration/request_log.md) has slightly changed to
+include `ru=(...)` and `db=(...)` labels to better disambiguate the number groupings.
+Previously, these values appeared without labels.
+
+This only matters if you have third-party tooling that parses the Synapse logs.
+
# Upgrading to v1.146.0
## Drop support for Ubuntu 25.04 Plucky Puffin, and add support for 25.10 Questing Quokka
diff --git a/docs/usage/administration/request_log.md b/docs/usage/administration/request_log.md
index 2cf5d72e3d..5079a5bd8f 100644
--- a/docs/usage/administration/request_log.md
+++ b/docs/usage/administration/request_log.md
@@ -5,8 +5,8 @@ HTTP request logs are written by synapse (see [`synapse/http/site.py`](https://g
See the following for how to decode the dense data available from the default logging configuration.
```
-2020-10-01 12:00:00,000 - synapse.access.http.8008 - 311 - INFO - PUT-1000- 192.168.0.1 - 8008 - {another-matrix-server.com} Processed request: 0.100sec/-0.000sec (0.000sec, 0.000sec) (0.001sec/0.090sec/3) 11B !200 "PUT /_matrix/federation/v1/send/1600000000000 HTTP/1.1" "Synapse/1.20.1" [0 dbevts]
--AAAAAAAAAAAAAAAAAAAAA- -BBBBBBBBBBBBBBBBBBBBBB- -C- -DD- -EEEEEE- -FFFFFFFFF- -GG- -HHHHHHHHHHHHHHHHHHHHHHH- -IIIIII- -JJJJJJJ- -KKKKKK-, -LLLLLL- -MMMMMMM- -NNNNNN- O -P- -QQ- -RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR- -SSSSSSSSSSSS- -TTTTTT-
+2020-10-01 12:00:00,000 - synapse.access.http.8008 - 311 - INFO - PUT-1000- 192.168.0.1 - 8008 - {another-matrix-server.com} Processed request: 0.100sec/-0.000sec ru=(0.000sec, 0.000sec) db=(0.001sec/0.090sec/3) 11B !200 "PUT /_matrix/federation/v1/send/1600000000000 HTTP/1.1" "Synapse/1.20.1" [0 dbevts]
+-AAAAAAAAAAAAAAAAAAAAA- -BBBBBBBBBBBBBBBBBBBBBB- -C- -DD- -EEEEEE- -FFFFFFFFF- -GG- -HHHHHHHHHHHHHHHHHHHHHHH- -IIIIII- -JJJJJJJ- -KKKKKK-, -LLLLLL- -MMMMMM- -NNNNNN- O -P- -QQ- -RRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRRR- -SSSSSSSSSSSS- -TTTTTT-
```
diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md
index 50be84d68b..085e43a842 100644
--- a/docs/usage/configuration/config_documentation.md
+++ b/docs/usage/configuration/config_documentation.md
@@ -4518,7 +4518,7 @@ stream_writers:
---
### `outbound_federation_restricted_to`
-*(array)* When using workers, you can restrict outbound federation traffic to only go through a specific subset of workers. Any worker specified here must also be in the [`instance_map`](#instance_map). [`worker_replication_secret`](#worker_replication_secret) must also be configured to authorize inter-worker communication.
+*(array)* You can restrict outbound federation traffic to only go through a specific subset of workers including the [Secure Border Gateway (SBG)](https://element.io/en/server-suite/secure-border-gateways). Any worker specified here (including the SBG) must also be in the [`instance_map`](#instance_map). [`worker_replication_secret`](#worker_replication_secret) must also be configured to authorize inter-worker communication.
Also see the [worker documentation](../../workers.md#restrict-outbound-federation-traffic-to-a-specific-set-of-workers) for more info.
diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md
index 23a55abdcc..9cc96df73c 100644
--- a/docs/usage/configuration/logging_sample_config.md
+++ b/docs/usage/configuration/logging_sample_config.md
@@ -14,6 +14,9 @@ It should be named `.log.config` by default.
Hint: If you're looking for a guide on what each of the fields in the "Processed request" log lines mean,
see [Request log format](../administration/request_log.md).
+If you use `systemd.journal.JournalHandler` in your own logging config, ensure
+`systemd-python` is installed in Synapse's runtime environment.
+
```yaml
{{#include ../../sample_log_config.yaml}}
```
diff --git a/mypy.ini b/mypy.ini
index 216f054431..7a76be6c7d 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -69,7 +69,7 @@ warn_unused_ignores = False
;; https://github.com/python/typeshed/tree/master/stubs
;; and for each package `foo` there's a corresponding `types-foo` package on PyPI,
;; which we can pull in as a dev dependency by adding to `pyproject.toml`'s
-;; `[tool.poetry.group.dev.dependencies]` list.
+;; `[dependency-groups]` `dev` list.
# https://github.com/lepture/authlib/issues/460
[mypy-authlib.*]
diff --git a/poetry.lock b/poetry.lock
index 8a19f62ca3..42910348cc 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -619,71 +619,6 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[package.extras]
grpc = ["grpcio (>=1.44.0,<2.0.0)"]
-[[package]]
-name = "grpcio"
-version = "1.71.0"
-description = "HTTP/2-based RPC framework"
-optional = true
-python-versions = ">=3.9"
-groups = ["main"]
-markers = "python_version <= \"3.13\" and (extra == \"opentracing-otlp\" or extra == \"opentelemetry-log-handler\" or extra == \"all\")"
-files = [
- {file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"},
- {file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"},
- {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:0ab8b2864396663a5b0b0d6d79495657ae85fa37dcb6498a2669d067c65c11ea"},
- {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c30f393f9d5ff00a71bb56de4aa75b8fe91b161aeb61d39528db6b768d7eac69"},
- {file = "grpcio-1.71.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f250ff44843d9a0615e350c77f890082102a0318d66a99540f54769c8766ab73"},
- {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6d8de076528f7c43a2f576bc311799f89d795aa6c9b637377cc2b1616473804"},
- {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9b91879d6da1605811ebc60d21ab6a7e4bae6c35f6b63a061d61eb818c8168f6"},
- {file = "grpcio-1.71.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f71574afdf944e6652203cd1badcda195b2a27d9c83e6d88dc1ce3cfb73b31a5"},
- {file = "grpcio-1.71.0-cp310-cp310-win32.whl", hash = "sha256:8997d6785e93308f277884ee6899ba63baafa0dfb4729748200fcc537858a509"},
- {file = "grpcio-1.71.0-cp310-cp310-win_amd64.whl", hash = "sha256:7d6ac9481d9d0d129224f6d5934d5832c4b1cddb96b59e7eba8416868909786a"},
- {file = "grpcio-1.71.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:d6aa986318c36508dc1d5001a3ff169a15b99b9f96ef5e98e13522c506b37eef"},
- {file = "grpcio-1.71.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:d2c170247315f2d7e5798a22358e982ad6eeb68fa20cf7a820bb74c11f0736e7"},
- {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:e6f83a583ed0a5b08c5bc7a3fe860bb3c2eac1f03f1f63e0bc2091325605d2b7"},
- {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4be74ddeeb92cc87190e0e376dbc8fc7736dbb6d3d454f2fa1f5be1dee26b9d7"},
- {file = "grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4dd0dfbe4d5eb1fcfec9490ca13f82b089a309dc3678e2edabc144051270a66e"},
- {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a2242d6950dc892afdf9e951ed7ff89473aaf744b7d5727ad56bdaace363722b"},
- {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0fa05ee31a20456b13ae49ad2e5d585265f71dd19fbd9ef983c28f926d45d0a7"},
- {file = "grpcio-1.71.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3d081e859fb1ebe176de33fc3adb26c7d46b8812f906042705346b314bde32c3"},
- {file = "grpcio-1.71.0-cp311-cp311-win32.whl", hash = "sha256:d6de81c9c00c8a23047136b11794b3584cdc1460ed7cbc10eada50614baa1444"},
- {file = "grpcio-1.71.0-cp311-cp311-win_amd64.whl", hash = "sha256:24e867651fc67717b6f896d5f0cac0ec863a8b5fb7d6441c2ab428f52c651c6b"},
- {file = "grpcio-1.71.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:0ff35c8d807c1c7531d3002be03221ff9ae15712b53ab46e2a0b4bb271f38537"},
- {file = "grpcio-1.71.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:b78a99cd1ece4be92ab7c07765a0b038194ded2e0a26fd654591ee136088d8d7"},
- {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:dc1a1231ed23caac1de9f943d031f1bc38d0f69d2a3b243ea0d664fc1fbd7fec"},
- {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6beeea5566092c5e3c4896c6d1d307fb46b1d4bdf3e70c8340b190a69198594"},
- {file = "grpcio-1.71.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5170929109450a2c031cfe87d6716f2fae39695ad5335d9106ae88cc32dc84c"},
- {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5b08d03ace7aca7b2fadd4baf291139b4a5f058805a8327bfe9aece7253b6d67"},
- {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f903017db76bf9cc2b2d8bdd37bf04b505bbccad6be8a81e1542206875d0e9db"},
- {file = "grpcio-1.71.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:469f42a0b410883185eab4689060a20488a1a0a00f8bbb3cbc1061197b4c5a79"},
- {file = "grpcio-1.71.0-cp312-cp312-win32.whl", hash = "sha256:ad9f30838550695b5eb302add33f21f7301b882937460dd24f24b3cc5a95067a"},
- {file = "grpcio-1.71.0-cp312-cp312-win_amd64.whl", hash = "sha256:652350609332de6dac4ece254e5d7e1ff834e203d6afb769601f286886f6f3a8"},
- {file = "grpcio-1.71.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:cebc1b34ba40a312ab480ccdb396ff3c529377a2fce72c45a741f7215bfe8379"},
- {file = "grpcio-1.71.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:85da336e3649a3d2171e82f696b5cad2c6231fdd5bad52616476235681bee5b3"},
- {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f9a412f55bb6e8f3bb000e020dbc1e709627dcb3a56f6431fa7076b4c1aab0db"},
- {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47be9584729534660416f6d2a3108aaeac1122f6b5bdbf9fd823e11fe6fbaa29"},
- {file = "grpcio-1.71.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9c80ac6091c916db81131d50926a93ab162a7e97e4428ffc186b6e80d6dda4"},
- {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:789d5e2a3a15419374b7b45cd680b1e83bbc1e52b9086e49308e2c0b5bbae6e3"},
- {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:1be857615e26a86d7363e8a163fade914595c81fec962b3d514a4b1e8760467b"},
- {file = "grpcio-1.71.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a76d39b5fafd79ed604c4be0a869ec3581a172a707e2a8d7a4858cb05a5a7637"},
- {file = "grpcio-1.71.0-cp313-cp313-win32.whl", hash = "sha256:74258dce215cb1995083daa17b379a1a5a87d275387b7ffe137f1d5131e2cfbb"},
- {file = "grpcio-1.71.0-cp313-cp313-win_amd64.whl", hash = "sha256:22c3bc8d488c039a199f7a003a38cb7635db6656fa96437a8accde8322ce2366"},
- {file = "grpcio-1.71.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c6a0a28450c16809f94e0b5bfe52cabff63e7e4b97b44123ebf77f448534d07d"},
- {file = "grpcio-1.71.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:a371e6b6a5379d3692cc4ea1cb92754d2a47bdddeee755d3203d1f84ae08e03e"},
- {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:39983a9245d37394fd59de71e88c4b295eb510a3555e0a847d9965088cdbd033"},
- {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9182e0063112e55e74ee7584769ec5a0b4f18252c35787f48738627e23a62b97"},
- {file = "grpcio-1.71.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693bc706c031aeb848849b9d1c6b63ae6bcc64057984bb91a542332b75aa4c3d"},
- {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:20e8f653abd5ec606be69540f57289274c9ca503ed38388481e98fa396ed0b41"},
- {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:8700a2a57771cc43ea295296330daaddc0d93c088f0a35cc969292b6db959bf3"},
- {file = "grpcio-1.71.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d35a95f05a8a2cbe8e02be137740138b3b2ea5f80bd004444e4f9a1ffc511e32"},
- {file = "grpcio-1.71.0-cp39-cp39-win32.whl", hash = "sha256:f9c30c464cb2ddfbc2ddf9400287701270fdc0f14be5f08a1e3939f1e749b455"},
- {file = "grpcio-1.71.0-cp39-cp39-win_amd64.whl", hash = "sha256:63e41b91032f298b3e973b3fa4093cbbc620c875e2da7b93e249d4728b54559a"},
- {file = "grpcio-1.71.0.tar.gz", hash = "sha256:2b85f7820475ad3edec209d3d89a7909ada16caab05d3f2e08a7e8ae3200a55c"},
-]
-
-[package.extras]
-protobuf = ["grpcio-tools (>=1.71.0)"]
-
[[package]]
name = "grpcio"
version = "1.78.0"
@@ -691,7 +626,7 @@ description = "HTTP/2-based RPC framework"
optional = true
python-versions = ">=3.9"
groups = ["main"]
-markers = "python_version >= \"3.14\" and (extra == \"opentracing-otlp\" or extra == \"opentelemetry-log-handler\" or extra == \"all\")"
+markers = "extra == \"opentracing-otlp\" or extra == \"opentelemetry-log-handler\" or extra == \"all\""
files = [
{file = "grpcio-1.78.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:7cc47943d524ee0096f973e1081cb8f4f17a4615f2116882a5f1416e4cfe92b5"},
{file = "grpcio-1.78.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:c3f293fdc675ccba4db5a561048cca627b5e7bd1c8a6973ffedabe7d116e22e2"},
@@ -2665,14 +2600,14 @@ windows-terminal = ["colorama (>=0.4.6)"]
[[package]]
name = "pyjwt"
-version = "2.11.0"
+version = "2.12.0"
description = "JSON Web Token implementation in Python"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "pyjwt-2.11.0-py3-none-any.whl", hash = "sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469"},
- {file = "pyjwt-2.11.0.tar.gz", hash = "sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623"},
+ {file = "pyjwt-2.12.0-py3-none-any.whl", hash = "sha256:9bb459d1bdd0387967d287f5656bf7ec2b9a26645d1961628cda1764e087fd6e"},
+ {file = "pyjwt-2.12.0.tar.gz", hash = "sha256:2f62390b667cd8257de560b850bb5a883102a388829274147f1d724453f8fb02"},
]
[package.dependencies]
@@ -2757,18 +2692,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=7.4.0)", "pytest-cov (>=2.10.1)", "
[[package]]
name = "pyopenssl"
-version = "25.3.0"
+version = "26.0.0"
description = "Python wrapper module around the OpenSSL library"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6"},
- {file = "pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329"},
+ {file = "pyopenssl-26.0.0-py3-none-any.whl", hash = "sha256:df94d28498848b98cc1c0ffb8ef1e71e40210d3b0a8064c9d29571ed2904bf81"},
+ {file = "pyopenssl-26.0.0.tar.gz", hash = "sha256:f293934e52936f2e3413b89c6ce36df66a0b34ae1ea3a053b8c5020ff2f513fc"},
]
[package.dependencies]
-cryptography = ">=45.0.7,<47"
+cryptography = ">=46.0.0,<47"
typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""}
[package.extras]
@@ -3488,18 +3423,6 @@ c = ["sqlglotc"]
dev = ["duckdb (>=0.6)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "pyperf", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing_extensions"]
rs = ["sqlglotrs (==0.13.0)"]
-[[package]]
-name = "systemd-python"
-version = "235"
-description = "Python interface for libsystemd"
-optional = true
-python-versions = "*"
-groups = ["main"]
-markers = "extra == \"systemd\""
-files = [
- {file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"},
-]
-
[[package]]
name = "threadloop"
version = "1.0.2"
@@ -3593,25 +3516,23 @@ markers = {main = "python_version < \"3.11\""}
[[package]]
name = "tornado"
-version = "6.5.4"
+version = "6.5.5"
description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"opentracing-jaeger\" or extra == \"all\""
files = [
- {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d6241c1a16b1c9e4cc28148b1cda97dd1c6cb4fb7068ac1bedc610768dff0ba9"},
- {file = "tornado-6.5.4-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2d50f63dda1d2cac3ae1fa23d254e16b5e38153758470e9956cbc3d813d40843"},
- {file = "tornado-6.5.4-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cf66105dc6acb5af613c054955b8137e34a03698aa53272dbda4afe252be17"},
- {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50ff0a58b0dc97939d29da29cd624da010e7f804746621c78d14b80238669335"},
- {file = "tornado-6.5.4-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5fb5e04efa54cf0baabdd10061eb4148e0be137166146fff835745f59ab9f7f"},
- {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9c86b1643b33a4cd415f8d0fe53045f913bf07b4a3ef646b735a6a86047dda84"},
- {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:6eb82872335a53dd063a4f10917b3efd28270b56a33db69009606a0312660a6f"},
- {file = "tornado-6.5.4-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6076d5dda368c9328ff41ab5d9dd3608e695e8225d1cd0fd1e006f05da3635a8"},
- {file = "tornado-6.5.4-cp39-abi3-win32.whl", hash = "sha256:1768110f2411d5cd281bac0a090f707223ce77fd110424361092859e089b38d1"},
- {file = "tornado-6.5.4-cp39-abi3-win_amd64.whl", hash = "sha256:fa07d31e0cd85c60713f2b995da613588aa03e1303d75705dca6af8babc18ddc"},
- {file = "tornado-6.5.4-cp39-abi3-win_arm64.whl", hash = "sha256:053e6e16701eb6cbe641f308f4c1a9541f91b6261991160391bfc342e8a551a1"},
- {file = "tornado-6.5.4.tar.gz", hash = "sha256:a22fa9047405d03260b483980635f0b041989d8bcc9a313f8fe18b411d84b1d7"},
+ {file = "tornado-6.5.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:487dc9cc380e29f58c7ab88f9e27cdeef04b2140862e5076a66fb6bb68bb1bfa"},
+ {file = "tornado-6.5.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:65a7f1d46d4bb41df1ac99f5fcb685fb25c7e61613742d5108b010975a9a6521"},
+ {file = "tornado-6.5.5-cp39-abi3-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e74c92e8e65086b338fd56333fb9a68b9f6f2fe7ad532645a290a464bcf46be5"},
+ {file = "tornado-6.5.5-cp39-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:435319e9e340276428bbdb4e7fa732c2d399386d1de5686cb331ec8eee754f07"},
+ {file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3f54aa540bdbfee7b9eb268ead60e7d199de5021facd276819c193c0fb28ea4e"},
+ {file = "tornado-6.5.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:36abed1754faeb80fbd6e64db2758091e1320f6bba74a4cf8c09cd18ccce8aca"},
+ {file = "tornado-6.5.5-cp39-abi3-win32.whl", hash = "sha256:dd3eafaaeec1c7f2f8fdcd5f964e8907ad788fe8a5a32c4426fbbdda621223b7"},
+ {file = "tornado-6.5.5-cp39-abi3-win_amd64.whl", hash = "sha256:6443a794ba961a9f619b1ae926a2e900ac20c34483eea67be4ed8f1e58d3ef7b"},
+ {file = "tornado-6.5.5-cp39-abi3-win_arm64.whl", hash = "sha256:2c9a876e094109333f888539ddb2de4361743e5d21eece20688e3e351e4990a6"},
+ {file = "tornado-6.5.5.tar.gz", hash = "sha256:192b8f3ea91bd7f1f50c06955416ed76c6b72f96779b962f07f911b91e8d30e9"},
]
[[package]]
@@ -4123,11 +4044,10 @@ postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"]
redis = ["hiredis", "txredisapi"]
saml2 = ["defusedxml", "pysaml2", "pytz"]
sentry = ["sentry-sdk"]
-systemd = ["systemd-python"]
test = ["idna", "parameterized"]
url-preview = ["lxml"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10.0,<4.0.0"
-content-hash = "becaf9b6d8948f796a3d0605e92198aa2ec4a0a79cca273efe293befe3cc16d9"
+content-hash = "6e7c3178d9dcf928487b553fd28d2f2c3a679378799ee9c1094eb3168af6fda7"
diff --git a/pyproject.toml b/pyproject.toml
index dacf4b001f..7e1909b708 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "matrix-synapse"
-version = "1.149.1"
+version = "1.150.0"
description = "Homeserver for the Matrix decentralised comms protocol"
readme = "README.rst"
authors = [
@@ -135,10 +135,6 @@ saml2 = [
"pytz>=2018.3", # via pysaml2
]
oidc = ["authlib>=0.15.1"]
-# systemd-python is necessary for logging to the systemd journal via
-# `systemd.journal.JournalHandler`, as is documented in
-# `contrib/systemd/log_config.yaml`.
-systemd = ["systemd-python>=231"]
url-preview = ["lxml>=4.6.3"]
sentry = ["sentry-sdk>=0.7.2"]
opentracing-jaeger = [
@@ -212,7 +208,6 @@ all = [
"opentelemetry-exporter-otlp==1.40.0",
# omitted:
# - test: it's useful to have this separate from dev deps in the olddeps job
- # - systemd: this is a system-based requirement
# Transitive dependencies
# These dependencies aren't directly required by Synapse.
@@ -247,6 +242,9 @@ update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry]
packages = [{ include = "synapse" }]
+# We're using PEP 735 dependency groups, which requires Poetry 2.2.0+
+requires-poetry = ">=2.2.0"
+
[tool.poetry.build]
# Compile our rust module when using `poetry install`. This is still required
# while using `poetry` as the build frontend. Saves the developer from needing
@@ -274,55 +272,55 @@ generate-setup-file = true
# Dependencies used for developing Synapse itself.
#
-# Hold off on migrating these to `dev-dependencies` (PEP 735) for now until
-# Poetry 2.2.0+, pip 25.1+ are more widely available.
-[tool.poetry.group.dev.dependencies]
# We pin development dependencies in poetry.lock so that our tests don't start
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
-ruff = "0.14.6"
-
-# Typechecking
-lxml-stubs = ">=0.4.0"
-mypy = "*"
-mypy-zope = "*"
-types-bleach = ">=4.1.0"
-types-jsonschema = ">=3.2.0"
-types-netaddr = ">=0.8.0.6"
-types-opentracing = ">=2.4.2"
-types-Pillow = ">=8.3.4"
-types-psycopg2 = ">=2.9.9"
-types-pyOpenSSL = ">=20.0.7"
-types-PyYAML = ">=5.4.10"
-types-requests = ">=2.26.0"
-types-setuptools = ">=57.4.0"
-
-# Dependencies which are exclusively required by unit test code. This is
-# NOT a list of all modules that are necessary to run the unit tests.
-# Tests assume that all optional dependencies are installed.
-#
-# If this is updated, don't forget to update the equivalent lines in
-# project.optional-dependencies.test.
-parameterized = ">=0.9.0"
-idna = ">=3.3"
-
-# The following are used by the release script
-click = ">=8.1.3"
-# GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
-GitPython = ">=3.1.20"
-markdown-it-py = ">=3.0.0"
-pygithub = ">=1.59"
-# The following are executed as commands by the release script.
-twine = "*"
-# Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
-towncrier = ">=18.6.0rc1"
-
-# Used for checking the Poetry lockfile
-tomli = ">=1.2.3"
-
-# Used for checking the schema delta files
-sqlglot = ">=28.0.0"
+[dependency-groups]
+dev = [
+ "ruff==0.14.6",
+
+ # Typechecking
+ "lxml-stubs>=0.4.0",
+ "mypy",
+ "mypy-zope",
+ "types-bleach>=4.1.0",
+ "types-jsonschema>=3.2.0",
+ "types-netaddr>=0.8.0.6",
+ "types-opentracing>=2.4.2",
+ "types-Pillow>=8.3.4",
+ "types-psycopg2>=2.9.9",
+ "types-pyOpenSSL>=20.0.7",
+ "types-PyYAML>=5.4.10",
+ "types-requests>=2.26.0",
+ "types-setuptools>=57.4.0",
+
+ # Dependencies which are exclusively required by unit test code. This is
+ # NOT a list of all modules that are necessary to run the unit tests.
+ # Tests assume that all optional dependencies are installed.
+ #
+ # If this is updated, don't forget to update the equivalent lines in
+ # project.optional-dependencies.test.
+ "parameterized>=0.9.0",
+ "idna>=3.3",
+
+ # The following are used by the release script
+ "click>=8.1.3",
+ # GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints.
+ "GitPython>=3.1.20",
+ "markdown-it-py>=3.0.0",
+ "pygithub>=1.59",
+ # The following are executed as commands by the release script.
+ "twine",
+ # Towncrier min version comes from https://github.com/matrix-org/synapse/pull/3425. Rationale unclear.
+ "towncrier>=18.6.0rc1",
+
+ # Used for checking the Poetry lockfile
+ "tomli>=1.2.3",
+
+ # Used for checking the schema delta files
+ "sqlglot>=28.0.0",
+]
[tool.towncrier]
package = "synapse"
diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs
index 4bf2252d8f..0233623e80 100644
--- a/rust/src/events/internal_metadata.rs
+++ b/rust/src/events/internal_metadata.rs
@@ -57,6 +57,7 @@ enum EventInternalMetadataData {
PolicyServerSpammy(bool),
Redacted(bool),
TxnId(Box),
+ DelayId(Box),
TokenId(i64),
DeviceId(Box),
}
@@ -115,6 +116,10 @@ impl EventInternalMetadataData {
pyo3::intern!(py, "txn_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
),
+ EventInternalMetadataData::DelayId(o) => (
+ pyo3::intern!(py, "delay_id"),
+ o.into_pyobject(py).unwrap_infallible().into_any(),
+ ),
EventInternalMetadataData::TokenId(o) => (
pyo3::intern!(py, "token_id"),
o.into_pyobject(py).unwrap_infallible().into_any(),
@@ -179,6 +184,12 @@ impl EventInternalMetadataData {
.map(String::into_boxed_str)
.with_context(|| format!("'{key_str}' has invalid type"))?,
),
+ "delay_id" => EventInternalMetadataData::DelayId(
+ value
+ .extract()
+ .map(String::into_boxed_str)
+ .with_context(|| format!("'{key_str}' has invalid type"))?,
+ ),
"token_id" => EventInternalMetadataData::TokenId(
value
.extract()
@@ -472,6 +483,17 @@ impl EventInternalMetadata {
set_property!(self, TxnId, obj.into_boxed_str());
}
+ /// The delay ID, set only if the event was a delayed event.
+ #[getter]
+ fn get_delay_id(&self) -> PyResult<&str> {
+ let s = get_property!(self, DelayId)?;
+ Ok(s)
+ }
+ #[setter]
+ fn set_delay_id(&mut self, obj: String) {
+ set_property!(self, DelayId, obj.into_boxed_str());
+ }
+
/// The access token ID of the user who sent this event, if any.
#[getter]
fn get_token_id(&self) -> PyResult {
diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml
index 5ed9114550..f8194ad726 100644
--- a/schema/synapse-config.schema.yaml
+++ b/schema/synapse-config.schema.yaml
@@ -1,5 +1,5 @@
$schema: https://famedly.github.io/synapse/latest/schema/v1/meta.schema.json
-$id: https://famedly.github.io/synapse/schema/synapse/v1.149.1/synapse-config.schema.json
+$id: https://famedly.github.io/synapse/schema/synapse/v1.150/synapse-config.schema.json
type: object
properties:
famedly_maximum_refresh_token_lifetime:
@@ -5544,11 +5544,13 @@ properties:
outbound_federation_restricted_to:
type: array
description: >-
- When using workers, you can restrict outbound federation traffic to only
- go through a specific subset of workers. Any worker specified here must
- also be in the [`instance_map`](#instance_map).
- [`worker_replication_secret`](#worker_replication_secret) must also be
- configured to authorize inter-worker communication.
+ You can restrict outbound federation traffic to only go through a specific subset
+ of workers including the [Secure Border Gateway
+ (SBG)](https://element.io/en/server-suite/secure-border-gateways). Any worker
+ specified here (including the SBG) must also be in the
+ [`instance_map`](#instance_map).
+ [`worker_replication_secret`](#worker_replication_secret) must also be configured
+ to authorize inter-worker communication.
Also see the [worker
diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh
index fec005fdb1..a8a361fd4a 100755
--- a/scripts-dev/complement.sh
+++ b/scripts-dev/complement.sh
@@ -38,17 +38,22 @@ set -e
# Tag local builds with a dummy registry namespace so that later builds may reference
# them exactly instead of accidentally pulling from a remote registry.
#
-# This is important as some storage drivers/types prefer remote images over local
-# (`containerd`) which causes problems as we're testing against some remote image that
-# doesn't include all of the changes that we're trying to test (be it locally or in a PR
-# in CI). This is spawning from a real-world problem where the GitHub runners were
+# This is important as some Docker storage drivers/types prefer remote images over local
+# (like `containerd`) which causes problems as we're testing against some remote image
+# that doesn't include all of the changes that we're trying to test (be it locally or in
+# a PR in CI). This is spawning from a real-world problem where the GitHub runners were
# updated to use Docker Engine 29.0.0+ which uses `containerd` by default for new
# installations.
+#
+# XXX: If the Docker image name changes, don't forget to update
+# `.github/workflows/push_complement_image.yml` as well
LOCAL_IMAGE_NAMESPACE=localhost
# The image tags for how these images will be stored in the registry
SYNAPSE_IMAGE_PATH="$LOCAL_IMAGE_NAMESPACE/synapse"
SYNAPSE_WORKERS_IMAGE_PATH="$LOCAL_IMAGE_NAMESPACE/synapse-workers"
+# XXX: If the Docker image name changes, don't forget to update
+# `.github/workflows/push_complement_image.yml` as well
COMPLEMENT_SYNAPSE_IMAGE_PATH="$LOCAL_IMAGE_NAMESPACE/complement-synapse"
SYNAPSE_EDITABLE_IMAGE_PATH="$LOCAL_IMAGE_NAMESPACE/synapse-editable"
@@ -215,11 +220,17 @@ main() {
$CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' "$COMPLEMENT_SYNAPSE_EDITABLE_IMAGE_PATH" -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so
else
+ # We remove the `egg-info` as it can contain outdated information which won't line
+ # up with our current reality.
+ rm -rf matrix_synapse.egg-info/
+ # Figure out the Synapse version string in our current checkout
+ synapse_version_string="$(poetry run python -c 'from synapse.util import SYNAPSE_VERSION; print(SYNAPSE_VERSION)')"
# Build the base Synapse image from the local checkout
echo_if_github "::group::Build Docker image: matrixdotorg/synapse"
$CONTAINER_RUNTIME build \
-t "$SYNAPSE_IMAGE_PATH" \
+ --build-arg SYNAPSE_VERSION_STRING="$synapse_version_string" \
--build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \
--build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \
-f "docker/Dockerfile" .
diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py
index 30ff57a925..b31c0baebf 100644
--- a/synapse/config/experimental.py
+++ b/synapse/config/experimental.py
@@ -3,6 +3,7 @@
#
# Copyright 2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2025 Element Creations Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -520,6 +521,10 @@ def read_config(
"msc4108_delegation_endpoint", None
)
+ # MSC4370: Get extremities federation endpoint
+ # See https://github.com/element-hq/synapse/issues/19524
+ self.msc4370_enabled = experimental.get("msc4370_enabled", False)
+
auth_delegated = self.msc3861.enabled or (
config.get("matrix_authentication_service") or {}
).get("enabled", False)
diff --git a/synapse/events/utils.py b/synapse/events/utils.py
index b79a68f589..89eb2182af 100644
--- a/synapse/events/utils.py
+++ b/synapse/events/utils.py
@@ -48,7 +48,7 @@
from synapse.logging.opentracing import SynapseTags, set_tag, trace
from synapse.types import JsonDict, Requester
-from . import EventBase, StrippedStateEvent, make_event_from_dict
+from . import EventBase, FrozenEventV2, StrippedStateEvent, make_event_from_dict
if TYPE_CHECKING:
from synapse.handlers.relations import BundledAggregations
@@ -109,6 +109,14 @@ def clone_event(event: EventBase) -> EventBase:
event.get_dict(), event.room_version, event.internal_metadata.get_dict()
)
+ # Starting FrozenEventV2, the event ID is an (expensive) hash of the event. This is
+ # lazily computed when we get the FrozenEventV2.event_id property, then cached in
+ # _event_id field. Later FrozenEvent formats all inherit from FrozenEventV2, so we
+ # can use the same logic here.
+ if isinstance(event, FrozenEventV2) and isinstance(new_event, FrozenEventV2):
+ # If we already pre-computed the event ID, use it.
+ new_event._event_id = event._event_id
+
# Copy the bits of `internal_metadata` that aren't returned by `get_dict`.
new_event.internal_metadata.stream_ordering = (
event.internal_metadata.stream_ordering
@@ -412,7 +420,7 @@ class SerializeEventConfig:
# Function to convert from federation format to client format
event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1
# The entity that requested the event. This is used to determine whether to include
- # the transaction_id in the unsigned section of the event.
+ # the transaction_id and delay_id in the unsigned section of the event.
requester: Requester | None = None
# List of event fields to include. If empty, all fields will be returned.
only_event_fields: list[str] | None = None
@@ -475,44 +483,49 @@ def serialize_event(
config=config,
)
- # If we have a txn_id saved in the internal_metadata, we should include it in the
- # unsigned section of the event if it was sent by the same session as the one
- # requesting the event.
- txn_id: str | None = getattr(e.internal_metadata, "txn_id", None)
- if (
- txn_id is not None
- and config.requester is not None
- and config.requester.user.to_string() == e.sender
- ):
- # Some events do not have the device ID stored in the internal metadata,
- # this includes old events as well as those created by appservice, guests,
- # or with tokens minted with the admin API. For those events, fallback
- # to using the access token instead.
- event_device_id: str | None = getattr(e.internal_metadata, "device_id", None)
- if event_device_id is not None:
- if event_device_id == config.requester.device_id:
- d["unsigned"]["transaction_id"] = txn_id
-
- else:
- # Fallback behaviour: only include the transaction ID if the event
- # was sent from the same access token.
- #
- # For regular users, the access token ID can be used to determine this.
- # This includes access tokens minted with the admin API.
- #
- # For guests and appservice users, we can't check the access token ID
- # so assume it is the same session.
- event_token_id: int | None = getattr(e.internal_metadata, "token_id", None)
- if (
- (
- event_token_id is not None
- and config.requester.access_token_id is not None
- and event_token_id == config.requester.access_token_id
+ # If we have applicable fields saved in the internal_metadata, include them in the
+ # unsigned section of the event if the event was sent by the same session (or when
+ # appropriate, just the same sender) as the one requesting the event.
+ if config.requester is not None and config.requester.user.to_string() == e.sender:
+ txn_id: str | None = getattr(e.internal_metadata, "txn_id", None)
+ if txn_id is not None:
+ # Some events do not have the device ID stored in the internal metadata,
+ # this includes old events as well as those created by appservice, guests,
+ # or with tokens minted with the admin API. For those events, fallback
+ # to using the access token instead.
+ event_device_id: str | None = getattr(
+ e.internal_metadata, "device_id", None
+ )
+ if event_device_id is not None:
+ if event_device_id == config.requester.device_id:
+ d["unsigned"]["transaction_id"] = txn_id
+
+ else:
+ # Fallback behaviour: only include the transaction ID if the event
+ # was sent from the same access token.
+ #
+ # For regular users, the access token ID can be used to determine this.
+ # This includes access tokens minted with the admin API.
+ #
+ # For guests and appservice users, we can't check the access token ID
+ # so assume it is the same session.
+ event_token_id: int | None = getattr(
+ e.internal_metadata, "token_id", None
)
- or config.requester.is_guest
- or config.requester.app_service
- ):
- d["unsigned"]["transaction_id"] = txn_id
+ if (
+ (
+ event_token_id is not None
+ and config.requester.access_token_id is not None
+ and event_token_id == config.requester.access_token_id
+ )
+ or config.requester.is_guest
+ or config.requester.app_service
+ ):
+ d["unsigned"]["transaction_id"] = txn_id
+
+ delay_id: str | None = getattr(e.internal_metadata, "delay_id", None)
+ if delay_id is not None:
+ d["unsigned"]["org.matrix.msc4140.delay_id"] = delay_id
# invite_room_state and knock_room_state are a list of stripped room state events
# that are meant to provide metadata about a room to an invitee/knocker. They are
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index a662fa35cb..a869f231f3 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -4,6 +4,7 @@
# Copyright 2019-2021 Matrix.org Federation C.I.C
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2025 Element Creations Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -683,6 +684,18 @@ async def on_query_request(
resp = await self.registry.on_query(query_type, args)
return 200, resp
+ async def on_get_extremities_request(self, origin: str, room_id: str) -> JsonDict:
+ # Assert host in room first to hide contents of the ACL from the caller
+ await self._event_auth_handler.assert_host_in_room(room_id, origin)
+ origin_host, _ = parse_server_name(origin)
+ await self.check_server_matches_acl(origin_host, room_id)
+
+ extremities = await self.store.get_forward_extremities_for_room(room_id)
+ prev_event_ids = [event_id for event_id, _, _, _ in extremities]
+ if len(prev_event_ids) == 0:
+ raise SynapseError(500, "Room has no forward extremities")
+ return {"prev_events": prev_event_ids}
+
async def on_make_join_request(
self, origin: str, room_id: str, user_id: str, supported_versions: list[str]
) -> dict[str, Any]:
diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py
index 6d92d00523..0eff49cf73 100644
--- a/synapse/federation/transport/server/__init__.py
+++ b/synapse/federation/transport/server/__init__.py
@@ -4,6 +4,7 @@
# Copyright 2020 Sorunome
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2025 Element Creations Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -33,6 +34,7 @@
FederationMediaDownloadServlet,
FederationMediaThumbnailServlet,
FederationUnstableClientKeysClaimServlet,
+ FederationUnstableGetExtremitiesServlet,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
@@ -326,6 +328,12 @@ def register_servlets(
if not hs.config.media.can_load_media_repo:
continue
+ if (
+ servletclass == FederationUnstableGetExtremitiesServlet
+ and not hs.config.experimental.msc4370_enabled
+ ):
+ continue
+
servletclass(
hs=hs,
authenticator=authenticator,
diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py
index a7c297c0b7..d783e6da51 100644
--- a/synapse/federation/transport/server/federation.py
+++ b/synapse/federation/transport/server/federation.py
@@ -1,8 +1,9 @@
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
-# Copyright 2021 The Matrix.org Foundation C.I.C.
+# Copyright 2021 The Matrix.org Foundation C.I.C.
# Copyright (C) 2023 New Vector, Ltd
+# Copyright (C) 2025 Element Creations Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
@@ -273,6 +274,22 @@ async def on_GET(
return await self.handler.on_query_request(query_type, args)
+class FederationUnstableGetExtremitiesServlet(BaseFederationServerServlet):
+ PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc4370"
+ PATH = "/extremities/(?P[^/]*)"
+ CATEGORY = "Federation requests"
+
+ async def on_GET(
+ self,
+ origin: str,
+ content: Literal[None],
+ query: dict[bytes, list[bytes]],
+ room_id: str,
+ ) -> tuple[int, JsonDict]:
+ result = await self.handler.on_get_extremities_request(origin, room_id)
+ return 200, result
+
+
class FederationMakeJoinServlet(BaseFederationServerServlet):
PATH = "/make_join/(?P[^/]*)/(?P[^/]*)"
CATEGORY = "Federation requests"
@@ -884,6 +901,7 @@ async def on_GET(
FederationBackfillServlet,
FederationTimestampLookupServlet,
FederationQueryServlet,
+ FederationUnstableGetExtremitiesServlet,
FederationMakeJoinServlet,
FederationMakeLeaveServlet,
FederationEventServlet,
diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py
index e4c646ce87..538bdaaaf8 100644
--- a/synapse/handlers/deactivate_account.py
+++ b/synapse/handlers/deactivate_account.py
@@ -167,13 +167,13 @@ async def deactivate_account(
# Mark the user as erased, if they asked for that
if erase_data:
user = UserID.from_string(user_id)
- # Remove avatar URL from this user
- await self._profile_handler.set_avatar_url(
- user, requester, "", by_admin, deactivation=True
- )
- # Remove displayname from this user
- await self._profile_handler.set_displayname(
- user, requester, "", by_admin, deactivation=True
+ # Remove displayname, avatar URL and custom profile fields from this user
+ #
+ # Note that displayname and avatar URL may persist as historical state events
+ # in rooms, but these cases behave like message history, following
+ # https://spec.matrix.org/v1.17/client-server-api/#post_matrixclientv3accountdeactivate
+ await self._profile_handler.delete_profile_upon_deactivation(
+ user, requester, by_admin
)
logger.info("Marking %s as erased", user_id)
diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py
index 7e41716f1e..4a9f646d4d 100644
--- a/synapse/handlers/delayed_events.py
+++ b/synapse/handlers/delayed_events.py
@@ -560,6 +560,7 @@ async def _send_event(
action=membership,
content=event.content,
origin_server_ts=event.origin_server_ts,
+ delay_id=event.delay_id,
)
else:
event_dict: JsonDict = {
@@ -585,6 +586,7 @@ async def _send_event(
requester,
event_dict,
txn_id=txn_id,
+ delay_id=event.delay_id,
)
event_id = sent_event.event_id
except ShadowBanError:
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 76aa3c7c32..fc5dceb80b 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -585,6 +585,7 @@ async def create_event(
state_map: StateMap[str] | None = None,
for_batch: bool = False,
current_state_group: int | None = None,
+ delay_id: str | None = None,
) -> tuple[EventBase, UnpersistedEventContextBase]:
"""
Given a dict from a client, create a new event. If bool for_batch is true, will
@@ -600,7 +601,7 @@ async def create_event(
Args:
requester
event_dict: An entire event
- txn_id
+ txn_id: The transaction ID.
prev_event_ids:
the forward extremities to use as the prev_events for the
new event.
@@ -639,6 +640,8 @@ async def create_event(
current_state_group: the current state group, used only for creating events for
batch persisting
+ delay_id: The delay ID of this event, if it was a delayed event.
+
Raises:
ResourceLimitError if server is blocked to some resource being
exceeded
@@ -726,6 +729,9 @@ async def create_event(
if txn_id is not None:
builder.internal_metadata.txn_id = txn_id
+ if delay_id is not None:
+ builder.internal_metadata.delay_id = delay_id
+
builder.internal_metadata.outlier = outlier
event, unpersisted_context = await self.create_new_client_event(
@@ -966,6 +972,7 @@ async def create_and_send_nonmember_event(
ignore_shadow_ban: bool = False,
outlier: bool = False,
depth: int | None = None,
+ delay_id: str | None = None,
) -> tuple[EventBase, int]:
"""
Creates an event, then sends it.
@@ -994,6 +1001,7 @@ async def create_and_send_nonmember_event(
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
+ delay_id: The delay ID of this event, if it was a delayed event.
Returns:
The event, and its stream ordering (if deduplication happened,
@@ -1090,6 +1098,7 @@ async def create_and_send_nonmember_event(
ignore_shadow_ban=ignore_shadow_ban,
outlier=outlier,
depth=depth,
+ delay_id=delay_id,
)
async def _create_and_send_nonmember_event_locked(
@@ -1103,6 +1112,7 @@ async def _create_and_send_nonmember_event_locked(
ignore_shadow_ban: bool = False,
outlier: bool = False,
depth: int | None = None,
+ delay_id: str | None = None,
) -> tuple[EventBase, int]:
room_id = event_dict["room_id"]
@@ -1131,6 +1141,7 @@ async def _create_and_send_nonmember_event_locked(
state_event_ids=state_event_ids,
outlier=outlier,
depth=depth,
+ delay_id=delay_id,
)
context = await unpersisted_context.persist(event)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 6ecf04487e..a52b9f8ecf 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -20,8 +20,11 @@
#
import logging
import random
+from bisect import bisect_right
from typing import TYPE_CHECKING
+from twisted.internet.defer import CancelledError
+
from synapse.api.constants import ProfileFields
from synapse.api.errors import (
AuthError,
@@ -32,7 +35,17 @@
SynapseError,
)
from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia
-from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester
+from synapse.storage.roommember import ProfileInfo
+from synapse.types import (
+ JsonDict,
+ JsonMapping,
+ JsonValue,
+ Requester,
+ ScheduledTask,
+ TaskStatus,
+ UserID,
+ create_requester,
+)
from synapse.util.caches.descriptors import cached
from synapse.util.duration import Duration
from synapse.util.stringutils import parse_and_validate_mxc_uri
@@ -46,6 +59,8 @@
MAX_AVATAR_URL_LEN = 1000
# Field name length is specced at 255 bytes.
MAX_CUSTOM_FIELD_LEN = 255
+UPDATE_JOIN_STATES_ACTION_NAME = "update_join_states"
+UPDATE_JOIN_STATES_LOCK_NAME = "update_join_states_lock"
class ProfileHandler:
@@ -78,6 +93,12 @@ def __init__(self, hs: "HomeServer"):
self._third_party_rules = hs.get_module_api_callbacks().third_party_event_rules
+ self._task_scheduler = hs.get_task_scheduler()
+ self._task_scheduler.register_action(
+ self._update_join_states_task, UPDATE_JOIN_STATES_ACTION_NAME
+ )
+ self._worker_locks = hs.get_worker_locks_handler()
+
async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict:
"""
Get a user's profile as a JSON dictionary.
@@ -173,18 +194,24 @@ async def set_displayname(
target_user: UserID,
requester: Requester,
new_displayname: str,
+ *,
by_admin: bool = False,
- deactivation: bool = False,
propagate: bool = True,
) -> None:
"""Set the displayname of a user
+ Preconditions:
+ - This must NOT be called as part of deactivating the user, because we will
+ notify modules about the change whilst claiming it is not related
+ to user deactivation and we will also (if `propagate=True`) send
+ updates into rooms, which could cause rooms to be accidentally joined
+ after the deactivated user has left them.
+
Args:
target_user: the user whose displayname is to be changed.
requester: The user attempting to make this change.
new_displayname: The displayname to give this user.
by_admin: Whether this change was made by an administrator.
- deactivation: Whether this change was made while deactivating the user.
propagate: Whether this change also applies to the user's membership events.
"""
if not self.hs.is_mine(target_user):
@@ -230,12 +257,13 @@ async def set_displayname(
await self.store.set_profile_displayname(target_user, displayname_to_set)
profile = await self.store.get_profileinfo(target_user)
+
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
await self._third_party_rules.on_profile_update(
- target_user.to_string(), profile, by_admin, deactivation
+ target_user.to_string(), profile, by_admin, deactivation=False
)
if propagate:
@@ -279,18 +307,24 @@ async def set_avatar_url(
target_user: UserID,
requester: Requester,
new_avatar_url: str,
+ *,
by_admin: bool = False,
- deactivation: bool = False,
propagate: bool = True,
) -> None:
"""Set a new avatar URL for a user.
+ Preconditions:
+ - This must NOT be called as part of deactivating the user, because we will
+ notify modules about the change whilst claiming it is not related
+ to user deactivation and we will also (if `propagate=True`) send
+ updates into rooms, which could cause rooms to be accidentally joined
+ after the deactivated user has left them.
+
Args:
target_user: the user whose avatar URL is to be changed.
requester: The user attempting to make this change.
new_avatar_url: The avatar URL to give this user.
by_admin: Whether this change was made by an administrator.
- deactivation: Whether this change was made while deactivating the user.
propagate: Whether this change also applies to the user's membership events.
"""
if not self.hs.is_mine(target_user):
@@ -334,17 +368,79 @@ async def set_avatar_url(
await self.store.set_profile_avatar_url(target_user, avatar_url_to_set)
profile = await self.store.get_profileinfo(target_user)
+
await self.user_directory_handler.handle_local_profile_change(
target_user.to_string(), profile
)
await self._third_party_rules.on_profile_update(
- target_user.to_string(), profile, by_admin, deactivation
+ target_user.to_string(), profile, by_admin, deactivation=False
)
if propagate:
await self._update_join_states(requester, target_user)
+ async def delete_profile_upon_deactivation(
+ self,
+ target_user: UserID,
+ requester: Requester,
+ by_admin: bool = False,
+ ) -> None:
+ """
+ Clear the user's profile upon user deactivation (specifically, when user erasure is needed).
+
+ This includes the displayname, avatar_url, all custom profile fields.
+
+ The user directory is NOT updated in any way; it is the caller's responsibility to remove
+ the user from the user directory.
+
+ Rooms' join states are NOT updated in any way; it is the caller's responsibility to soon
+ **leave** the room on the user's behalf, so there's no point sending new join events into
+ rooms to propagate the profile deletion.
+ See the `users_pending_deactivation` table and the associated user parter loop.
+ """
+ if not self.hs.is_mine(target_user):
+ raise SynapseError(400, "User is not hosted on this homeserver")
+
+ # Prevent users from deactivating anyone but themselves,
+ # except for admins who can deactivate anyone.
+ if not by_admin and target_user != requester.user:
+ # It's a little strange to have this check here, but given all the sibling
+ # methods have these checks, it'd be even stranger to be inconsistent and not
+ # have it.
+ raise AuthError(400, "Cannot remove another user's profile")
+
+ if not by_admin:
+ current_profile = await self.store.get_profileinfo(target_user)
+ if not self.hs.config.registration.enable_set_displayname:
+ if current_profile.display_name:
+ # SUSPICIOUS: It seems strange to block deactivation on this,
+ # though this is preserving previous behaviour.
+ raise SynapseError(
+ 400,
+ "Changing display name is disabled on this server",
+ Codes.FORBIDDEN,
+ )
+
+ if not self.hs.config.registration.enable_set_avatar_url:
+ if current_profile.avatar_url:
+ # SUSPICIOUS: It seems strange to block deactivation on this,
+ # though this is preserving previous behaviour.
+ raise SynapseError(
+ 400,
+ "Changing avatar is disabled on this server",
+ Codes.FORBIDDEN,
+ )
+
+ await self.store.delete_profile(target_user)
+
+ await self._third_party_rules.on_profile_update(
+ target_user.to_string(),
+ ProfileInfo(None, None),
+ by_admin,
+ deactivation=True,
+ )
+
@cached()
async def check_avatar_size_and_mime_type(self, mxc: str) -> bool:
"""Check that the size and content type of the avatar at the given MXC URI are
@@ -466,18 +562,22 @@ async def set_profile_field(
requester: Requester,
field_name: str,
new_value: JsonValue,
+ *,
by_admin: bool = False,
- deactivation: bool = False,
) -> None:
"""Set a new profile field for a user.
+ Preconditions:
+ - This must NOT be called as part of deactivating the user, because we will
+ notify modules about the change whilst claiming it is not related
+ to user deactivation.
+
Args:
target_user: the user whose profile is to be changed.
requester: The user attempting to make this change.
field_name: The name of the profile field to update.
new_value: The new field value for this user.
by_admin: Whether this change was made by an administrator.
- deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
@@ -490,7 +590,7 @@ async def set_profile_field(
# Custom fields do not propagate into the user directory *or* rooms.
profile = await self.store.get_profileinfo(target_user)
await self._third_party_rules.on_profile_update(
- target_user.to_string(), profile, by_admin, deactivation
+ target_user.to_string(), profile, by_admin, deactivation=False
)
async def delete_profile_field(
@@ -498,17 +598,21 @@ async def delete_profile_field(
target_user: UserID,
requester: Requester,
field_name: str,
+ *,
by_admin: bool = False,
- deactivation: bool = False,
) -> None:
"""Delete a field from a user's profile.
+ Preconditions:
+ - This must NOT be called as part of deactivating the user, because we will
+ notify modules about the change whilst claiming it is not related
+ to user deactivation.
+
Args:
target_user: the user whose profile is to be changed.
requester: The user attempting to make this change.
field_name: The name of the profile field to remove.
by_admin: Whether this change was made by an administrator.
- deactivation: Whether this change was made while deactivating the user.
"""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this homeserver")
@@ -521,7 +625,7 @@ async def delete_profile_field(
# Custom fields do not propagate into the user directory *or* rooms.
profile = await self.store.get_profileinfo(target_user)
await self._third_party_rules.on_profile_update(
- target_user.to_string(), profile, by_admin, deactivation
+ target_user.to_string(), profile, by_admin, deactivation=False
)
async def on_profile_query(self, args: JsonDict) -> JsonDict:
@@ -591,7 +695,53 @@ async def _update_join_states(
await self.clock.sleep(Duration(seconds=random.randint(1, 10)))
return
- room_ids = await self.store.get_rooms_for_user(target_user.to_string())
+ target_user_str = target_user.to_string()
+
+ # Cancel any ongoing profile membership updates for this user,
+ # and start a new one.
+ async with self._worker_locks.acquire_lock(
+ UPDATE_JOIN_STATES_LOCK_NAME,
+ target_user_str,
+ ):
+ tasks_to_cancel = await self._task_scheduler.get_tasks(
+ actions=[UPDATE_JOIN_STATES_ACTION_NAME],
+ resource_id=target_user_str,
+ statuses=[TaskStatus.ACTIVE, TaskStatus.SCHEDULED],
+ )
+ assert len(tasks_to_cancel) <= 1, "Expected at most one task to cancel"
+ for task in tasks_to_cancel:
+ await self._task_scheduler.cancel_task(task.id)
+
+ await self._task_scheduler.schedule_task(
+ UPDATE_JOIN_STATES_ACTION_NAME,
+ resource_id=target_user_str,
+ params={
+ "requester_authenticated_entity": requester.authenticated_entity,
+ },
+ )
+
+ async def _update_join_states_task(
+ self,
+ task: ScheduledTask,
+ ) -> tuple[TaskStatus, JsonMapping | None, str | None]:
+ assert task.resource_id
+ assert task.params
+
+ target_user = UserID.from_string(task.resource_id)
+ room_ids = sorted(await self.store.get_rooms_for_user(target_user.to_string()))
+
+ last_room_id = task.result.get("last_room_id", None) if task.result else None
+
+ if last_room_id:
+ # Filter out room IDs that have already been handled
+ # by finding the first room ID greater than the last handled room ID
+ # and slicing the list from that point onwards.
+ room_ids = room_ids[bisect_right(room_ids, last_room_id) :]
+
+ requester = create_requester(
+ user_id=target_user,
+ authenticated_entity=task.params.get("requester_authenticated_entity"),
+ )
for room_id in room_ids:
handler = self.hs.get_room_member_handler()
@@ -605,10 +755,17 @@ async def _update_join_states(
"join", # We treat a profile update like a join.
ratelimit=False, # Try to hide that these events aren't atomic.
)
+ except CancelledError as e:
+ raise e
except Exception as e:
logger.warning(
"Failed to update join event for room %s - %s", room_id, str(e)
)
+ await self._task_scheduler.update_task(
+ task.id, result={"last_room_id": room_id}
+ )
+
+ return TaskStatus.COMPLETE, None, None
async def check_profile_query_allowed(
self, target_user: UserID, requester: UserID | None = None
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 0c6be72716..b2e678e90e 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -408,6 +408,7 @@ async def _local_membership_update(
require_consent: bool = True,
outlier: bool = False,
origin_server_ts: int | None = None,
+ delay_id: str | None = None,
) -> tuple[str, int]:
"""
Internal membership update function to get an existing event or create
@@ -440,6 +441,7 @@ async def _local_membership_update(
opposed to being inline with the current DAG.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
+ delay_id: The delay ID of this event, if it was a delayed event.
Returns:
Tuple of event ID and stream ordering position
@@ -492,6 +494,7 @@ async def _local_membership_update(
depth=depth,
require_consent=require_consent,
outlier=outlier,
+ delay_id=delay_id,
)
context = await unpersisted_context.persist(event)
prev_state_ids = await context.get_prev_state_ids(
@@ -587,6 +590,7 @@ async def update_membership(
state_event_ids: list[str] | None = None,
depth: int | None = None,
origin_server_ts: int | None = None,
+ delay_id: str | None = None,
) -> tuple[str, int]:
"""Update a user's membership in a room.
@@ -617,6 +621,7 @@ async def update_membership(
based on the prev_events.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
+ delay_id: The delay ID of this event, if it was a delayed event.
Returns:
A tuple of the new event ID and stream ID.
@@ -679,6 +684,7 @@ async def update_membership(
state_event_ids=state_event_ids,
depth=depth,
origin_server_ts=origin_server_ts,
+ delay_id=delay_id,
)
return result
@@ -701,6 +707,7 @@ async def update_membership_locked(
state_event_ids: list[str] | None = None,
depth: int | None = None,
origin_server_ts: int | None = None,
+ delay_id: str | None = None,
) -> tuple[str, int]:
"""Helper for update_membership.
@@ -733,6 +740,7 @@ async def update_membership_locked(
based on the prev_events.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
+ delay_id: The delay ID of this event, if it was a delayed event.
Returns:
A tuple of the new event ID and stream ID.
@@ -943,6 +951,7 @@ async def update_membership_locked(
require_consent=require_consent,
outlier=outlier,
origin_server_ts=origin_server_ts,
+ delay_id=delay_id,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
@@ -1201,6 +1210,7 @@ async def update_membership_locked(
require_consent=require_consent,
outlier=outlier,
origin_server_ts=origin_server_ts,
+ delay_id=delay_id,
)
async def check_for_any_membership_in_room(
diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py
index ebbe7afa84..0b2587f94f 100644
--- a/synapse/handlers/sso.py
+++ b/synapse/handlers/sso.py
@@ -522,7 +522,10 @@ async def complete_sso_login_request(
authenticated_entity=user_id,
)
await self._profile_handler.set_displayname(
- user_id_obj, requester, attributes.display_name, True
+ user_id_obj,
+ requester,
+ attributes.display_name,
+ by_admin=True,
)
if attributes.picture:
await self.set_avatar(user_id, attributes.picture)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index a32748c2a9..c8ef5e2aa6 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -37,6 +37,7 @@
EventContentFields,
EventTypes,
Membership,
+ StickyEvent,
)
from synapse.api.filtering import FilterCollection
from synapse.api.presence import UserPresenceState
@@ -147,6 +148,7 @@ class JoinedSyncResult:
state: StateMap[EventBase]
ephemeral: list[JsonDict]
account_data: list[JsonDict]
+ sticky: list[EventBase]
unread_notifications: JsonDict
unread_thread_notifications: JsonDict
summary: JsonDict | None
@@ -157,7 +159,11 @@ def __bool__(self) -> bool:
to tell if room needs to be part of the sync result.
"""
return bool(
- self.timeline or self.state or self.ephemeral or self.account_data
+ self.timeline
+ or self.state
+ or self.ephemeral
+ or self.account_data
+ or self.sticky
# nb the notification count does not, er, count: if there's nothing
# else in the result, we don't need to send it.
)
@@ -601,6 +607,41 @@ async def ephemeral_by_room(
return now_token, ephemeral_by_room
+ async def sticky_events_by_room(
+ self,
+ sync_result_builder: "SyncResultBuilder",
+ now_token: StreamToken,
+ since_token: StreamToken | None = None,
+ ) -> tuple[StreamToken, dict[str, list[str]]]:
+ """Get the sticky events for each room the user is in
+ Args:
+ sync_result_builder
+ now_token: Where the server is currently up to.
+ since_token: Where the server was when the client last synced.
+ Returns:
+ A tuple of the now StreamToken, updated to reflect the which sticky
+ events are included, and a dict mapping from room_id to a list
+ of sticky event IDs for that room (in sticky event stream order).
+ """
+ now = self.clock.time_msec()
+ with Measure(
+ self.clock, name="sticky_events_by_room", server_name=self.server_name
+ ):
+ from_id = since_token.sticky_events_key if since_token else 0
+
+ room_ids = sync_result_builder.joined_room_ids
+
+ to_id, sticky_by_room = await self.store.get_sticky_events_in_rooms(
+ room_ids,
+ from_id=from_id,
+ to_id=now_token.sticky_events_key,
+ now=now,
+ limit=StickyEvent.MAX_EVENTS_IN_SYNC,
+ )
+ now_token = now_token.copy_and_replace(StreamKeyType.STICKY_EVENTS, to_id)
+
+ return now_token, sticky_by_room
+
async def _load_filtered_recents(
self,
room_id: str,
@@ -2177,11 +2218,43 @@ async def _generate_sync_entry_for_rooms(
)
sync_result_builder.now_token = now_token
+ sticky_by_room: dict[str, list[str]] = {}
+ if self.hs_config.experimental.msc4354_enabled:
+ now_token, sticky_by_room = await self.sticky_events_by_room(
+ sync_result_builder, now_token, since_token
+ )
+ sync_result_builder.now_token = now_token
+
# 2. We check up front if anything has changed, if it hasn't then there is
# no point in going further.
+ #
+ # If this is an initial sync (no since_token), then of course we can't skip
+ # the sync entry, as we have no base to use as a comparison for the question
+ # 'has anything changed' (this is the client's first time 'seeing' anything).
+ #
+ # Otherwise, for incremental syncs, we consider skipping the sync entry,
+ # doing cheap checks first:
+ #
+ # - are there any per-room EDUs;
+ # - is there any Room Account Data; or
+ # - are there any sticky events in the rooms; or
+ # - might the rooms have changed
+ # (using in-memory event stream change caches, which can
+ # only answer either 'Not changed' or 'Possibly changed')
+ #
+ # If none of those cheap checks give us a reason to continue generating the sync entry,
+ # we finally query the database to check for changed room tags.
+ # If there are also no changed tags, we can short-circuit return an empty sync entry.
if not sync_result_builder.full_state:
- if since_token and not ephemeral_by_room and not account_data_by_room:
- have_changed = await self._have_rooms_changed(sync_result_builder)
+ # Cheap checks first
+ if (
+ since_token
+ and not ephemeral_by_room
+ and not account_data_by_room
+ and not sticky_by_room
+ ):
+ # This is also a cheap check, but we log the answer
+ have_changed = self._may_have_rooms_changed(sync_result_builder)
log_kv({"rooms_have_changed": have_changed})
if not have_changed:
tags_by_room = await self.store.get_updated_tags(
@@ -2225,6 +2298,7 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None:
ephemeral=ephemeral_by_room.get(room_entry.room_id, []),
tags=tags_by_room.get(room_entry.room_id),
account_data=account_data_by_room.get(room_entry.room_id, {}),
+ sticky_event_ids=sticky_by_room.get(room_entry.room_id, []),
always_include=sync_result_builder.full_state,
)
logger.debug("Generated room entry for %s", room_entry.room_id)
@@ -2237,11 +2311,9 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None:
return set(newly_joined_rooms), set(newly_left_rooms)
- async def _have_rooms_changed(
- self, sync_result_builder: "SyncResultBuilder"
- ) -> bool:
+ def _may_have_rooms_changed(self, sync_result_builder: "SyncResultBuilder") -> bool:
"""Returns whether there may be any new events that should be sent down
- the sync. Returns True if there are.
+ the sync. Returns True if there **may** be.
Does not modify the `sync_result_builder`.
"""
@@ -2611,6 +2683,7 @@ async def _generate_room_entry(
ephemeral: list[JsonDict],
tags: Mapping[str, JsonMapping] | None,
account_data: Mapping[str, JsonMapping],
+ sticky_event_ids: list[str],
always_include: bool = False,
) -> None:
"""Populates the `joined` and `archived` section of `sync_result_builder`
@@ -2640,6 +2713,8 @@ async def _generate_room_entry(
tags: List of *all* tags for room, or None if there has been
no change.
account_data: List of new account data for room
+ sticky_event_ids: MSC4354 sticky events in the room, if any.
+ In sticky event stream order.
always_include: Always include this room in the sync response,
even if empty.
"""
@@ -2650,7 +2725,13 @@ async def _generate_room_entry(
events = room_builder.events
# We want to shortcut out as early as possible.
- if not (always_include or account_data or ephemeral or full_state):
+ if not (
+ always_include
+ or account_data
+ or ephemeral
+ or full_state
+ or sticky_event_ids
+ ):
if events == [] and tags is None:
return
@@ -2742,6 +2823,7 @@ async def _generate_room_entry(
or account_data_events
or ephemeral
or full_state
+ or sticky_event_ids
):
return
@@ -2788,6 +2870,32 @@ async def _generate_room_entry(
if room_builder.rtype == "joined":
unread_notifications: dict[str, int] = {}
+ sticky_events: list[EventBase] = []
+ if sticky_event_ids:
+ # As per MSC4354:
+ # Remove sticky events that are already in the timeline, else we will needlessly duplicate
+ # events.
+ # There is no purpose in including sticky events in the sticky section if they're already in
+ # the timeline, as either way the client becomes aware of them.
+ # This is particularly important given the risk of sticky events spam since
+ # anyone can send sticky events, so halving the bandwidth on average for each sticky
+ # event is helpful.
+ timeline_event_id_set = {ev.event_id for ev in batch.events}
+ # Must preserve sticky event stream order
+ sticky_event_ids = [
+ e for e in sticky_event_ids if e not in timeline_event_id_set
+ ]
+ if sticky_event_ids:
+ # Fetch and filter the sticky events
+ sticky_events = await filter_and_transform_events_for_client(
+ self._storage_controllers,
+ sync_result_builder.sync_config.user.to_string(),
+ await self.store.get_events_as_list(sticky_event_ids),
+ # As per MSC4354:
+ # > History visibility checks MUST NOT be applied to sticky events.
+ # > Any joined user is authorised to see sticky events for the duration they remain sticky.
+ always_include_ids=frozenset(sticky_event_ids),
+ )
room_sync = JoinedSyncResult(
room_id=room_id,
timeline=batch,
@@ -2798,6 +2906,7 @@ async def _generate_room_entry(
unread_thread_notifications={},
summary=summary,
unread_count=0,
+ sticky=sticky_events,
)
if room_sync or always_include:
diff --git a/synapse/http/server.py b/synapse/http/server.py
index 226cb00831..2c235e04f4 100644
--- a/synapse/http/server.py
+++ b/synapse/http/server.py
@@ -861,7 +861,18 @@ def respond_with_json(
encoder = _encode_json_bytes
request.setHeader(b"Content-Type", b"application/json")
- request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
+ # Insert a default Cache-Control header if the servlet hasn't already set one. The
+ # default directive tells both the client and any intermediary cache to not cache
+ # the response, which is a sensible default to have on most API endpoints.
+ # The absence `Cache-Control` header would mean that it's up to the clients and
+ # caching proxies mood to cache things if they want. This can be dangerous, which is
+ # why we explicitly set a "don't cache by default" policy.
+ # In practice, `no-store` should be enough, but having all three directives is more
+ # conservative in case we encounter weird, non-spec compliant caches.
+ # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#directives
+ # for more details.
+ if not request.responseHeaders.hasHeader(b"Cache-Control"):
+ request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
@@ -901,7 +912,18 @@ def respond_with_json_bytes(
request.setHeader(b"Content-Type", b"application/json")
request.setHeader(b"Content-Length", b"%d" % (len(json_bytes),))
- request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
+ # Insert a default Cache-Control header if the servlet hasn't already set one. The
+ # default directive tells both the client and any intermediary cache to not cache
+ # the response, which is a sensible default to have on most API endpoints.
+ # The absence `Cache-Control` header would mean that it's up to the clients and
+ # caching proxies mood to cache things if they want. This can be dangerous, which is
+ # why we explicitly set a "don't cache by default" policy.
+ # In practice, `no-store` should be enough, but having all three directives is more
+ # conservative in case we encounter weird, non-spec compliant caches.
+ # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control#directives
+ # for more details.
+ if not request.responseHeaders.hasHeader(b"Cache-Control"):
+ request.setHeader(b"Cache-Control", b"no-cache, no-store, must-revalidate")
if send_cors:
set_cors_headers(request)
diff --git a/synapse/http/site.py b/synapse/http/site.py
index 6ced5b98b3..9b7fd5c936 100644
--- a/synapse/http/site.py
+++ b/synapse/http/site.py
@@ -638,10 +638,12 @@ def _finished_processing(self) -> None:
if authenticated_entity:
requester = f"{authenticated_entity}|{requester}"
+ # Updates to this log line should also be reflected in our docs,
+ # `docs/usage/administration/request_log.md`
self.synapse_site.access_logger.log(
log_level,
"%s - %s - {%s}"
- " Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
+ " Processed request: %.3fsec/%.3fsec ru=(%.3fsec, %.3fsec) db=(%.3fsec/%.3fsec/%d)"
' %sB %s "%s %s %s" "%s" [%d dbevts]',
self.get_client_ip_if_available(),
self.synapse_site.site_tag,
diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py
index 5f07849bd8..7b8578eae9 100644
--- a/synapse/module_api/__init__.py
+++ b/synapse/module_api/__init__.py
@@ -161,6 +161,7 @@
from synapse.util.clock import Clock
from synapse.util.duration import Duration
from synapse.util.frozenutils import freeze
+from synapse.util.sentinel import Sentinel
if TYPE_CHECKING:
# Old versions don't have `LiteralString`
@@ -1992,27 +1993,47 @@ async def set_displayname(
self,
user_id: UserID,
new_displayname: str,
- deactivation: bool = False,
+ deactivation: bool | Sentinel = Sentinel.UNSET_SENTINEL,
) -> None:
"""Sets a user's display name.
Added in Synapse v1.76.0.
+ (Synapse Developer note: All future arguments should be kwargs-only
+ due to https://github.com/element-hq/synapse/issues/19546)
+
Args:
user_id:
The user whose display name is to be changed.
new_displayname:
The new display name to give the user.
deactivation:
+ **deprecated since v1.150.0**
+ Callers should NOT pass this argument. Instead, omit it and leave it to the default.
+ Will log an error if it is passed.
+ Remove after 2027-01-01
+ Tracked by https://github.com/element-hq/synapse/issues/19546
+
Whether this change was made while deactivating the user.
+
+ Should be omitted, will produce a logged error if set to True.
+ It's likely that this flag should have stayed internal-only and
+ was accidentally exposed to the Module API.
+ It no longer has any function.
"""
requester = create_requester(user_id)
+
+ if deactivation is not Sentinel.UNSET_SENTINEL:
+ logger.error(
+ "Deprecated `deactivation` parameter passed to `set_displayname` Module API (value: %r). This will break in 2027.",
+ deactivation,
+ )
+
await self._hs.get_profile_handler().set_displayname(
target_user=user_id,
requester=requester,
new_displayname=new_displayname,
by_admin=True,
- deactivation=deactivation,
)
def get_current_time_msec(self) -> int:
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 807a9cad5b..8265c2d789 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -369,7 +369,7 @@ async def on_PUT(
if user: # modify user
if "displayname" in body:
await self.profile_handler.set_displayname(
- target_user, requester, body["displayname"], True
+ target_user, requester, body["displayname"], by_admin=True
)
if threepids is not None:
@@ -418,7 +418,7 @@ async def on_PUT(
if "avatar_url" in body:
await self.profile_handler.set_avatar_url(
- target_user, requester, body["avatar_url"], True
+ target_user, requester, body["avatar_url"], by_admin=True
)
if "admin" in body:
@@ -526,7 +526,7 @@ async def on_PUT(
if "avatar_url" in body and isinstance(body["avatar_url"], str):
await self.profile_handler.set_avatar_url(
- target_user, requester, body["avatar_url"], True
+ target_user, requester, body["avatar_url"], by_admin=True
)
user_info_dict = await self.admin_handler.get_user(target_user)
diff --git a/synapse/rest/client/auth_metadata.py b/synapse/rest/client/auth_metadata.py
index 702f550906..062b8ed13e 100644
--- a/synapse/rest/client/auth_metadata.py
+++ b/synapse/rest/client/auth_metadata.py
@@ -49,6 +49,25 @@ def __init__(self, hs: "HomeServer"):
self._auth = hs.get_auth()
async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
+ # This endpoint is unauthenticated and the response only depends on
+ # the metadata we get from Matrix Authentication Service. Internally,
+ # MasDelegatedAuth/MSC3861DelegatedAuth.issuer() are already caching the
+ # response in memory anyway. Ideally we would follow any Cache-Control directive
+ # given by MAS, but this is fine for now.
+ #
+ # - `public` means it can be cached both in the browser and in caching proxies
+ # - `max-age` controls how long we cache on the browser side. 10m is sane enough
+ # - `s-maxage` controls how long we cache on the proxy side. Since caching
+ # proxies usually have a way to purge caches, it is fine to cache there for
+ # longer (1h), and issue cache invalidations in case we need it
+ # - `stale-while-revalidate` allows caching proxies to serve stale content while
+ # revalidating in the background. This is useful for making this request always
+ # 'snappy' to end users whilst still keeping it fresh
+ request.setHeader(
+ b"Cache-Control",
+ b"public, max-age=600, s-maxage=3600, stale-while-revalidate=600",
+ )
+
if self._config.mas.enabled:
assert isinstance(self._auth, MasDelegatedAuth)
return 200, {"issuer": await self._auth.issuer()}
@@ -94,6 +113,25 @@ def __init__(self, hs: "HomeServer"):
self._auth = hs.get_auth()
async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
+ # This endpoint is unauthenticated and the response only depends on
+ # the metadata we get from Matrix Authentication Service. Internally,
+ # MasDelegatedAuth/MSC3861DelegatedAuth.issuer() are already caching the
+ # response in memory anyway. Ideally we would follow any Cache-Control directive
+ # given by MAS, but this is fine for now.
+ #
+ # - `public` means it can be cached both in the browser and in caching proxies
+ # - `max-age` controls how long we cache on the browser side. 10m is sane enough
+ # - `s-maxage` controls how long we cache on the proxy side. Since caching
+ # proxies usually have a way to purge caches, it is fine to cache there for
+ # longer (1h), and issue cache invalidations in case we need it
+ # - `stale-while-revalidate` allows caching proxies to serve stale content while
+ # revalidating in the background. This is useful for making this request always
+ # 'snappy' to end users whilst still keeping it fresh
+ request.setHeader(
+ b"Cache-Control",
+ b"public, max-age=600, s-maxage=3600, stale-while-revalidate=600",
+ )
+
if self._config.mas.enabled:
assert isinstance(self._auth, MasDelegatedAuth)
return 200, await self._auth.auth_metadata()
diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py
index 7f3128cb61..c2ec5b3611 100644
--- a/synapse/rest/client/profile.py
+++ b/synapse/rest/client/profile.py
@@ -206,15 +206,15 @@ async def on_PUT(
if field_name == ProfileFields.DISPLAYNAME:
await self.profile_handler.set_displayname(
- user, requester, new_value, is_admin, propagate=propagate
+ user, requester, new_value, by_admin=is_admin, propagate=propagate
)
elif field_name == ProfileFields.AVATAR_URL:
await self.profile_handler.set_avatar_url(
- user, requester, new_value, is_admin, propagate=propagate
+ user, requester, new_value, by_admin=is_admin, propagate=propagate
)
else:
await self.profile_handler.set_profile_field(
- user, requester, field_name, new_value, is_admin
+ user, requester, field_name, new_value, by_admin=is_admin
)
return 200, {}
@@ -263,15 +263,15 @@ async def on_DELETE(
if field_name == ProfileFields.DISPLAYNAME:
await self.profile_handler.set_displayname(
- user, requester, "", is_admin, propagate=propagate
+ user, requester, "", by_admin=is_admin, propagate=propagate
)
elif field_name == ProfileFields.AVATAR_URL:
await self.profile_handler.set_avatar_url(
- user, requester, "", is_admin, propagate=propagate
+ user, requester, "", by_admin=is_admin, propagate=propagate
)
else:
await self.profile_handler.delete_profile_field(
- user, requester, field_name, is_admin
+ user, requester, field_name, by_admin=is_admin
)
return 200, {}
diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py
index 91f2f16771..710d097eab 100644
--- a/synapse/rest/client/sync.py
+++ b/synapse/rest/client/sync.py
@@ -619,6 +619,14 @@ async def encode_room(
ephemeral_events = room.ephemeral
result["ephemeral"] = {"events": ephemeral_events}
result["unread_notifications"] = room.unread_notifications
+ if room.sticky:
+ # The sticky events have already been deduplicated so that events
+ # appearing in the timeline won't appear again here
+ result["msc4354_sticky"] = {
+ "events": await self._event_serializer.serialize_events(
+ room.sticky, time_now, config=serialize_options
+ )
+ }
if room.unread_thread_notifications:
result["unread_thread_notifications"] = room.unread_thread_notifications
if self._msc3773_enabled:
diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py
index 23f5ffeedb..f8d7a1a4d9 100644
--- a/synapse/rest/client/versions.py
+++ b/synapse/rest/client/versions.py
@@ -81,6 +81,26 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]:
msc3575_enabled = await self.store.is_feature_enabled(
user_id, ExperimentalFeature.MSC3575
)
+ else:
+ # Allow caching of unauthenticated responses, as they only depend
+ # on server configuration which rarely changes.
+ #
+ # - `public` means it can be cached both in the browser and in caching proxies
+ # - `max-age` controls how long we cache on the browser side. 10m is sane enough
+ # - `s-maxage` controls how long we cache on the proxy side. Since caching
+ # proxies usually have a way to purge caches, it is fine to cache there for
+ # longer (1h), and issue cache invalidations in case we need it
+ # - `stale-while-revalidate` allows caching proxies to serve stale content while
+ # revalidating in the background. This is useful for making this request always
+ # 'snappy' to end users whilst still keeping it fresh
+ request.setHeader(
+ b"Cache-Control",
+ b"public, max-age=600, s-maxage=3600, stale-while-revalidate=600",
+ )
+
+ # Tell caches to vary on the Authorization header, so that
+ # authenticated responses are not served from cache.
+ request.setHeader(b"Vary", b"Authorization")
return (
200,
diff --git a/synapse/rest/synapse/mas/users.py b/synapse/rest/synapse/mas/users.py
index 55c7337555..01db41bcfa 100644
--- a/synapse/rest/synapse/mas/users.py
+++ b/synapse/rest/synapse/mas/users.py
@@ -112,6 +112,18 @@ class PostBody(RequestBodyModel):
unset_emails: StrictBool = False
set_emails: list[StrictStr] | None = None
+ locked: StrictBool | None = None
+ """
+ True to lock user. False to unlock. None to leave the same.
+
+ This is mostly for informational purposes; if the user's account is locked in MAS
+ but not in Synapse, the token introspection response will prevent them from using
+ their account.
+
+ However, having a local copy of the locked state in Synapse is useful for excluding
+ the user from the user directory.
+ """
+
@model_validator(mode="before")
@classmethod
def validate_exclusive(cls, values: Any) -> Any:
@@ -206,6 +218,9 @@ async def _async_render_POST(
validated_at=current_time,
)
+ if body.locked is not None:
+ await self.store.set_user_locked_status(user_id.to_string(), body.locked)
+
if body.unset_avatar_url:
await self.profile_handler.set_avatar_url(
target_user=user_id,
diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py
index 11ad516eb3..9b787e19a3 100644
--- a/synapse/storage/databases/main/profile.py
+++ b/synapse/storage/databases/main/profile.py
@@ -534,6 +534,18 @@ def delete_profile_field(txn: LoggingTransaction) -> None:
await self.db_pool.runInteraction("delete_profile_field", delete_profile_field)
+ async def delete_profile(self, user_id: UserID) -> None:
+ """
+ Deletes an entire user profile, including displayname, avatar_url and all custom fields.
+ Used at user deactivation when erasure is requested.
+ """
+
+ await self.db_pool.simple_delete(
+ desc="delete_profile",
+ table="profiles",
+ keyvalues={"full_user_id": user_id.to_string()},
+ )
+
class ProfileStore(ProfileWorkerStore):
pass
diff --git a/synapse/storage/databases/main/sticky_events.py b/synapse/storage/databases/main/sticky_events.py
index 101306296e..38b84443df 100644
--- a/synapse/storage/databases/main/sticky_events.py
+++ b/synapse/storage/databases/main/sticky_events.py
@@ -13,9 +13,7 @@
import logging
import random
from dataclasses import dataclass
-from typing import (
- TYPE_CHECKING,
-)
+from typing import TYPE_CHECKING, Collection, cast
from twisted.internet.defer import Deferred
@@ -25,6 +23,7 @@
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
+ make_in_list_sql_clause,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.storage.databases.main.state import StateGroupWorkerStore
@@ -138,6 +137,98 @@ def get_max_sticky_events_stream_id(self) -> int:
def get_sticky_events_stream_id_generator(self) -> MultiWriterIdGenerator:
return self._sticky_events_id_gen
+ async def get_sticky_events_in_rooms(
+ self,
+ room_ids: Collection[str],
+ *,
+ from_id: int,
+ to_id: int,
+ now: int,
+ limit: int | None,
+ ) -> tuple[int, dict[str, list[str]]]:
+ """
+ Fetch all the sticky events' IDs in the given rooms, with sticky stream IDs satisfying
+ from_id < sticky stream ID <= to_id.
+
+ The events are returned ordered by the sticky events stream.
+
+ Args:
+ room_ids: The room IDs to return sticky events in.
+ from_id: The sticky stream ID that sticky events should be returned from (exclusive).
+ to_id: The sticky stream ID that sticky events should end at (inclusive).
+ now: The current time in unix millis, used for skipping expired events.
+ limit: Max sticky events to return, or None to apply no limit.
+ Returns:
+ to_id, dict[room_id, list[event_ids]]
+ """
+ sticky_events_rows = await self.db_pool.runInteraction(
+ "get_sticky_events_in_rooms",
+ self._get_sticky_events_in_rooms_txn,
+ room_ids,
+ from_id=from_id,
+ to_id=to_id,
+ now=now,
+ limit=limit,
+ )
+
+ if not sticky_events_rows:
+ return to_id, {}
+
+ # Get stream_id of the last row, which is the highest
+ new_to_id, _, _ = sticky_events_rows[-1]
+
+ # room ID -> event IDs
+ room_id_to_event_ids: dict[str, list[str]] = {}
+ for _, room_id, event_id in sticky_events_rows:
+ events = room_id_to_event_ids.setdefault(room_id, [])
+ events.append(event_id)
+
+ return (new_to_id, room_id_to_event_ids)
+
+ def _get_sticky_events_in_rooms_txn(
+ self,
+ txn: LoggingTransaction,
+ room_ids: Collection[str],
+ *,
+ from_id: int,
+ to_id: int,
+ now: int,
+ limit: int | None,
+ ) -> list[tuple[int, str, str]]:
+ if len(room_ids) == 0:
+ return []
+ room_id_in_list_clause, room_id_in_list_values = make_in_list_sql_clause(
+ txn.database_engine, "se.room_id", room_ids
+ )
+ limit_clause = ""
+ limit_params: tuple[int, ...] = ()
+ if limit is not None:
+ limit_clause = "LIMIT ?"
+ limit_params = (limit,)
+
+ if isinstance(self.database_engine, PostgresEngine):
+ expr_soft_failed = "COALESCE(((ej.internal_metadata::jsonb)->>'soft_failed')::boolean, FALSE)"
+ else:
+ expr_soft_failed = "COALESCE(ej.internal_metadata->>'soft_failed', FALSE)"
+
+ txn.execute(
+ f"""
+ SELECT se.stream_id, se.room_id, event_id
+ FROM sticky_events se
+ INNER JOIN event_json ej USING (event_id)
+ WHERE
+ NOT {expr_soft_failed}
+ AND ? < expires_at
+ AND ? < stream_id
+ AND stream_id <= ?
+ AND {room_id_in_list_clause}
+ ORDER BY stream_id ASC
+ {limit_clause}
+ """,
+ (now, from_id, to_id, *room_id_in_list_values, *limit_params),
+ )
+ return cast(list[tuple[int, str, str]], txn.fetchall())
+
async def get_updated_sticky_events(
self, *, from_id: int, to_id: int, limit: int
) -> list[StickyEventUpdate]:
diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi
index 03ad8dc5ef..eac5699a77 100644
--- a/synapse/synapse_rust/events.pyi
+++ b/synapse/synapse_rust/events.pyi
@@ -38,6 +38,8 @@ class EventInternalMetadata:
txn_id: str
"""The transaction ID, if it was set when the event was created."""
+ delay_id: str
+ """The delay ID, set only if the event was a delayed event."""
token_id: int
"""The access token ID of the user who sent this event, if any."""
device_id: str
diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py
index fbd01914d5..f2898de0b8 100644
--- a/synapse/util/__init__.py
+++ b/synapse/util/__init__.py
@@ -21,6 +21,7 @@
import collections.abc
import logging
+import os
import typing
from typing import (
Iterator,
@@ -74,9 +75,15 @@ def log_failure(
return None
-# Version string with git info. Computed here once so that we don't invoke git multiple
-# times.
-SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__)
+SYNAPSE_VERSION = os.getenv(
+ "SYNAPSE_VERSION_STRING"
+) or get_distribution_version_string("matrix-synapse", __file__)
+"""
+Version string with git info.
+
+This can be overridden via the `SYNAPSE_VERSION_STRING` environment variable or is
+computed here once so that we don't invoke git multiple times.
+"""
class ExceptionBundle(Exception):
diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py
index e5cfc85a37..c1790fd3ae 100644
--- a/synapse/util/task_scheduler.py
+++ b/synapse/util/task_scheduler.py
@@ -471,8 +471,12 @@ async def wrapper() -> None:
log_context,
start_time,
)
+ result = None
+ error = None
try:
(status, result, error) = await function(task)
+ except defer.CancelledError:
+ status = TaskStatus.CANCELLED
except Exception:
f = Failure()
logger.error(
@@ -481,7 +485,6 @@ async def wrapper() -> None:
exc_info=(f.type, f.value, f.getTracebackObject()),
)
status = TaskStatus.FAILED
- result = None
error = f.getErrorMessage()
await self._store.update_scheduled_task(
diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py
index 84d961e2dc..cb24566f39 100644
--- a/tests/federation/test_federation_server.py
+++ b/tests/federation/test_federation_server.py
@@ -324,6 +324,141 @@ def test_needs_to_be_in_room(self) -> None:
self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+class UnstableGetExtremitiesTests(unittest.FederatingHomeserverTestCase):
+ servlets = [
+ admin.register_servlets,
+ room.register_servlets,
+ login.register_servlets,
+ ]
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ super().prepare(reactor, clock, hs)
+ self._storage_controllers = hs.get_storage_controllers()
+
+ def _make_endpoint_path(self, room_id: str) -> str:
+ return f"/_matrix/federation/unstable/org.matrix.msc4370/extremities/{room_id}"
+
+ def _remote_join(self, room_id: str, room_version: str) -> str:
+ # Note: other tests ensure the called endpoints in this function return useful
+ # and proper data.
+
+ # make_join first
+ joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME
+ channel = self.make_signed_federation_request(
+ "GET",
+ f"/_matrix/federation/v1/make_join/{room_id}/{joining_user}?ver={room_version}",
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+ join_result = channel.json_body
+
+ # Sign/populate the join
+ join_event_dict = join_result["event"]
+ self.add_hashes_and_signatures_from_other_server(
+ join_event_dict,
+ KNOWN_ROOM_VERSIONS[room_version],
+ )
+ if room_version in ["1", "2"]:
+ add_hashes_and_signatures(
+ KNOWN_ROOM_VERSIONS[room_version],
+ join_event_dict,
+ signature_name=self.hs.hostname,
+ signing_key=self.hs.signing_key,
+ )
+
+ # Send the join
+ channel = self.make_signed_federation_request(
+ "PUT",
+ f"/_matrix/federation/v2/send_join/{room_id}/x",
+ content=join_event_dict,
+ )
+
+ # Check that things went okay so the test doesn't become a total train wreck
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+ r = self.get_success(self._storage_controllers.state.get_current_state(room_id))
+ self.assertEqual(r[("m.room.member", joining_user)].membership, "join")
+
+ return r[("m.room.member", joining_user)].event_id
+
+ def _test_get_extremities_common(self, room_version: str) -> None:
+ # Create a room to test with
+ creator_user_id = self.register_user("kermit", "test")
+ tok = self.login("kermit", "test")
+ room_id = self.helper.create_room_as(
+ room_creator=creator_user_id,
+ tok=tok,
+ room_version=room_version,
+ extra_content={
+ # Public preset uses `shared` history visibility, but makes joins
+ # easier in our tests.
+ # https://spec.matrix.org/v1.16/client-server-api/#post_matrixclientv3createroom
+ "preset": "public_chat"
+ },
+ )
+
+ # At this stage we should fail to get the extremities because we're not joined
+ # and therefore can't see the events (`shared` history visibility).
+ channel = self.make_signed_federation_request(
+ "GET", self._make_endpoint_path(room_id)
+ )
+ self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.json_body)
+ self.assertEqual(channel.json_body["error"], "Host not in room.")
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+
+ # Now join the room and try again
+ # Note: there should be just one extremity: the join
+ join_event_id = self._remote_join(room_id, room_version)
+ channel = self.make_signed_federation_request(
+ "GET", self._make_endpoint_path(room_id)
+ )
+ self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body)
+ self.assertEqual(channel.json_body["prev_events"], [join_event_id])
+
+ # ACL the calling server and try again. This should cause an error getting extremities.
+ self.helper.send_state(
+ room_id,
+ "m.room.server_acl",
+ {
+ "allow": ["*"],
+ "allow_ip_literals": False,
+ "deny": [self.OTHER_SERVER_NAME],
+ },
+ tok=tok,
+ expect_code=HTTPStatus.OK,
+ )
+ channel = self.make_signed_federation_request(
+ "GET", self._make_endpoint_path(room_id)
+ )
+ self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.json_body)
+ self.assertEqual(channel.json_body["error"], "Server is banned from room")
+ self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")
+
+ @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()])
+ @override_config(
+ {"use_frozen_dicts": True, "experimental_features": {"msc4370_enabled": True}}
+ )
+ def test_get_extremities_with_frozen_dicts(self, room_version: str) -> None:
+ """Test GET /extremities with USE_FROZEN_DICTS=True"""
+ self._test_get_extremities_common(room_version)
+
+ @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()])
+ @override_config(
+ {"use_frozen_dicts": False, "experimental_features": {"msc4370_enabled": True}}
+ )
+ def test_get_extremities_without_frozen_dicts(self, room_version: str) -> None:
+ """Test GET /extremities with USE_FROZEN_DICTS=False"""
+ self._test_get_extremities_common(room_version)
+
+ # note the lack of config-setting stuff on this test.
+ def test_get_extremities_unstable_not_enabled(self) -> None:
+ """Test that GET /extremities returns M_UNRECOGNIZED when MSC4370 is not enabled"""
+ # We shouldn't even have to create a room - the endpoint should just fail.
+ channel = self.make_signed_federation_request(
+ "GET", self._make_endpoint_path("!room:example.org")
+ )
+ self.assertEqual(channel.code, HTTPStatus.NOT_FOUND, channel.json_body)
+ self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
+
+
class SendJoinFederationTests(unittest.FederatingHomeserverTestCase):
servlets = [
admin.register_servlets,
diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py
index 753060b583..a643c5116e 100644
--- a/tests/handlers/test_profile.py
+++ b/tests/handlers/test_profile.py
@@ -19,18 +19,23 @@
#
#
from typing import Any, Awaitable, Callable
-from unittest.mock import AsyncMock, Mock
+from unittest.mock import AsyncMock, Mock, patch
from parameterized import parameterized
from twisted.internet.testing import MemoryReactor
import synapse.types
+from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError, SynapseError
from synapse.rest import admin
+from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID
+from synapse.types.state import StateFilter
from synapse.util.clock import Clock
+from synapse.util.duration import Duration
+from synapse.util.task_scheduler import TaskStatus
from tests import unittest
from tests.unittest import override_config
@@ -39,7 +44,11 @@
class ProfileTestCase(unittest.HomeserverTestCase):
"""Tests profile management."""
- servlets = [admin.register_servlets]
+ servlets = [
+ admin.register_servlets,
+ login.register_servlets,
+ room.register_servlets,
+ ]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
self.mock_federation = AsyncMock()
@@ -63,12 +72,15 @@ def register_query_handler(
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
+ self.storage_controllers = self.hs.get_storage_controllers()
+ self.task_scheduler = hs.get_task_scheduler()
self.frank = UserID.from_string("@1234abcd:test")
self.bob = UserID.from_string("@4567:test")
self.alice = UserID.from_string("@alice:remote")
self.register_user(self.frank.localpart, "frankpassword")
+ self.frank_token = self.login(self.frank.localpart, "frankpassword")
self.handler = hs.get_profile_handler()
@@ -114,6 +126,195 @@ def test_set_my_name(self) -> None:
self.get_success(self.store.get_profile_displayname(self.frank))
)
+ def test_update_room_membership_on_set_displayname(self) -> None:
+ """Test that `set_displayname` updates membership events in rooms."""
+
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank"
+ )
+ )
+
+ room_id = self.helper.create_room_as(
+ self.frank.to_string(), tok=self.frank_token
+ )
+
+ state_tuple = (EventTypes.Member, self.frank.to_string())
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(membership[state_tuple].content["displayname"], "Frank")
+
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
+ )
+ )
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(membership[state_tuple].content["displayname"], "Frank Jr.")
+
+ def test_background_update_room_membership_on_set_displayname(self) -> None:
+ """Test that `set_displayname` returns immediately and that room membership updates are still done in background."""
+
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank"
+ )
+ )
+
+ room_id = self.helper.create_room_as(
+ self.frank.to_string(), tok=self.frank_token
+ )
+
+ original_update_membership = self.hs.get_room_member_handler().update_membership
+
+ async def slow_update_membership(*args: Any, **kwargs: Any) -> tuple[str, int]:
+ await self.clock.sleep(Duration(milliseconds=10))
+ return await original_update_membership(*args, **kwargs)
+
+ with patch.object(
+ self.hs.get_room_member_handler(),
+ "update_membership",
+ side_effect=slow_update_membership,
+ ):
+ state_tuple = (EventTypes.Member, self.frank.to_string())
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
+ )
+ )
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(membership[state_tuple].content["displayname"], "Frank")
+
+ # Let's be sure we are over the delay introduced by slow_update_membership
+ self.get_success(self.clock.sleep(Duration(milliseconds=20)), by=1)
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(
+ membership[state_tuple].content["displayname"], "Frank Jr."
+ )
+
+ def test_background_update_room_membership_resume_after_restart(self) -> None:
+ """Test that room membership updates triggered by changing the avatar or the display name are resumed after a restart."""
+
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank"
+ )
+ )
+
+ room_id_1 = self.helper.create_room_as(
+ self.frank.to_string(), tok=self.frank_token
+ )
+
+ room_id_2 = self.helper.create_room_as(
+ self.frank.to_string(), tok=self.frank_token
+ )
+
+ # Ensure `room_id_1` comes before `room_id_2` alphabetically
+ if room_id_1 > room_id_2:
+ room_id_1, room_id_2 = room_id_2, room_id_1
+
+ original_update_membership = self.hs.get_room_member_handler().update_membership
+
+ room_1_updated = False
+
+ async def potentially_slow_update_membership(
+ *args: Any, **kwargs: Any
+ ) -> tuple[str, int]:
+ if args[2] == room_id_2:
+ await self.clock.sleep(Duration(milliseconds=10))
+ if args[2] == room_id_1:
+ nonlocal room_1_updated
+ room_1_updated = True
+ return await original_update_membership(*args, **kwargs)
+
+ with patch.object(
+ self.hs.get_room_member_handler(),
+ "update_membership",
+ side_effect=potentially_slow_update_membership,
+ ):
+ state_tuple = (EventTypes.Member, self.frank.to_string())
+ self.get_success(
+ self.handler.set_displayname(
+ self.frank, synapse.types.create_requester(self.frank), "Frank Jr."
+ )
+ )
+
+ # Check that the displayname is updated immediately for the first room
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id_1, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(
+ membership[state_tuple].content["displayname"], "Frank Jr."
+ )
+
+ # Simulate a synapse restart by emptying the list of running tasks
+ # and canceling the deferred
+ _, deferred = self.task_scheduler._running_tasks.popitem()
+ deferred.cancel()
+
+ # Let's reset the flag to track whether room 1 was updated after the restart
+ room_1_updated = False
+
+ # Let's be sure we are over the delay introduced by slow_update_membership
+ # and that the task was not executed as expected
+ self.get_success(self.clock.sleep(Duration(milliseconds=20)), by=1)
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id_2, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(membership[state_tuple].content["displayname"], "Frank")
+
+ cancelled_task = self.get_success(
+ self.task_scheduler.get_tasks(
+ actions=["update_join_states"], statuses=[TaskStatus.CANCELLED]
+ )
+ )[0]
+
+ self.get_success(
+ self.task_scheduler.update_task(
+ cancelled_task.id, status=TaskStatus.ACTIVE
+ )
+ )
+
+ # Let's be sure we are over the delay introduced by slow_update_membership
+ self.get_success(self.clock.sleep(Duration(milliseconds=20)), by=1)
+
+ # Updates should have been resumed from room 2 after the restart
+ # so room 1 should not have been updated this time
+ self.assertFalse(room_1_updated)
+
+ membership = self.get_success(
+ self.storage_controllers.state.get_current_state(
+ room_id_2, StateFilter.from_types([state_tuple])
+ )
+ )
+ self.assertEqual(
+ membership[state_tuple].content["displayname"], "Frank Jr."
+ )
+
@override_config({"enable_set_displayname": False})
def test_set_displayname_if_disabled(self) -> None:
# Setting displayname for the first time is allowed
diff --git a/tests/rest/client/test_delayed_events.py b/tests/rest/client/test_delayed_events.py
index efa69a393a..da904ce1f5 100644
--- a/tests/rest/client/test_delayed_events.py
+++ b/tests/rest/client/test_delayed_events.py
@@ -22,7 +22,7 @@
from synapse.api.errors import Codes
from synapse.rest import admin
-from synapse.rest.client import delayed_events, login, room, versions
+from synapse.rest.client import delayed_events, login, room, sync, versions
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util.clock import Clock
@@ -59,6 +59,7 @@ class DelayedEventsTestCase(HomeserverTestCase):
delayed_events.register_servlets,
login.register_servlets,
room.register_servlets,
+ sync.register_servlets,
]
def default_config(self) -> JsonDict:
@@ -106,6 +107,9 @@ def test_delayed_state_events_are_sent_on_timeout(self) -> None:
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ delay_id = channel.json_body.get("delay_id")
+ assert delay_id is not None
+
events = self._get_delayed_events()
self.assertEqual(1, len(events), events)
content = self._get_delayed_event_content(events[0])
@@ -128,6 +132,56 @@ def test_delayed_state_events_are_sent_on_timeout(self) -> None:
)
self.assertEqual(setter_expected, content.get(setter_key), content)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, True)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
+ def test_delayed_member_events_are_sent_on_timeout(self) -> None:
+ channel = self.make_request(
+ "PUT",
+ _get_path_for_delayed_state(
+ self.room_id,
+ "m.room.member",
+ self.user2_user_id,
+ 900,
+ ),
+ {
+ "membership": "leave",
+ "reason": "Delayed kick",
+ },
+ self.user1_access_token,
+ )
+ self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ delay_id = channel.json_body.get("delay_id")
+ assert delay_id is not None
+
+ events = self._get_delayed_events()
+ self.assertEqual(1, len(events), events)
+ content = self._get_delayed_event_content(events[0])
+ self.assertEqual("leave", content.get("membership"), content)
+ self.assertEqual("Delayed kick", content.get("reason"), content)
+
+ content = self.helper.get_state(
+ self.room_id,
+ "m.room.member",
+ self.user1_access_token,
+ state_key=self.user2_user_id,
+ )
+ self.assertEqual("join", content.get("membership"), content)
+
+ self.reactor.advance(1)
+ self.assertListEqual([], self._get_delayed_events())
+ content = self.helper.get_state(
+ self.room_id,
+ "m.room.member",
+ self.user1_access_token,
+ state_key=self.user2_user_id,
+ )
+ self.assertEqual("leave", content.get("membership"), content)
+ self.assertEqual("Delayed kick", content.get("reason"), content)
+
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, True)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
def test_get_delayed_events_auth(self) -> None:
channel = self.make_request("GET", PATH_PREFIX)
self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, channel.result)
@@ -254,6 +308,9 @@ def test_cancel_delayed_state_event(self, action_in_path: bool) -> None:
expect_code=HTTPStatus.NOT_FOUND,
)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, False)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
@@ -327,6 +384,9 @@ def test_send_delayed_state_event(
)
self.assertEqual(content_value, content.get(content_property_name), content)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, True)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
@parameterized.expand((True, False))
@unittest.override_config({"rc_message": {"per_second": 2.5, "burst_count": 3}})
def test_send_delayed_event_ratelimit(self, action_in_path: bool) -> None:
@@ -406,6 +466,9 @@ def test_restart_delayed_state_event(self, action_in_path: bool) -> None:
)
self.assertEqual(setter_expected, content.get(setter_key), content)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, True)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
@parameterized.expand((True, False))
@unittest.override_config(
{"rc_delayed_event_mgmt": {"per_second": 0.5, "burst_count": 1}}
@@ -450,6 +513,8 @@ def test_delayed_state_is_not_cancelled_by_new_state_from_same_user(
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ delay_id = channel.json_body.get("delay_id")
+ assert delay_id is not None
events = self._get_delayed_events()
self.assertEqual(1, len(events), events)
@@ -474,6 +539,9 @@ def test_delayed_state_is_not_cancelled_by_new_state_from_same_user(
)
self.assertEqual(setter_expected, content.get(setter_key), content)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, True)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
def test_delayed_state_is_cancelled_by_new_state_from_other_user(
self,
) -> None:
@@ -489,6 +557,8 @@ def test_delayed_state_is_cancelled_by_new_state_from_other_user(
self.user1_access_token,
)
self.assertEqual(HTTPStatus.OK, channel.code, channel.result)
+ delay_id = channel.json_body.get("delay_id")
+ assert delay_id is not None
events = self._get_delayed_events()
self.assertEqual(1, len(events), events)
@@ -513,6 +583,9 @@ def test_delayed_state_is_cancelled_by_new_state_from_other_user(
)
self.assertEqual(setter_expected, content.get(setter_key), content)
+ self._find_sent_delayed_event(self.user1_access_token, delay_id, False)
+ self._find_sent_delayed_event(self.user2_access_token, delay_id, False)
+
def _get_delayed_events(self) -> list[JsonDict]:
channel = self.make_request(
"GET",
@@ -549,6 +622,39 @@ def _update_delayed_event(
body["action"] = action
return self.make_request("POST", path, body)
+ def _find_sent_delayed_event(
+ self, access_token: str, delay_id: str, should_find: bool
+ ) -> None:
+ """Call /sync and look for a synced event with a specified delay_id.
+ At most one event will ever have a matching delay_id.
+
+ Args:
+ access_token: The access token of the user to call /sync for.
+ delay_id: The delay_id to search for in synced events.
+ should_find: Whether /sync should include an event with a matching delay_id.
+ """
+ channel = self.make_request("GET", "/sync", access_token=access_token)
+ self.assertEqual(HTTPStatus.OK, channel.code)
+
+ rooms = channel.json_body["rooms"]
+ events = []
+ for membership in "join", "leave":
+ if membership in rooms:
+ events += rooms[membership][self.room_id]["timeline"]["events"]
+
+ found = False
+ for event in events:
+ if event["unsigned"].get("org.matrix.msc4140.delay_id") == delay_id:
+ if not should_find:
+ self.fail(
+ "Found event with matching delay_id, but expected to not find one"
+ )
+ if found:
+ self.fail("Found multiple events with matching delay_id")
+ found = True
+ if should_find and not found:
+ self.fail("Did not find event with matching delay_id")
+
def _get_path_for_delayed_state(
room_id: str, event_type: str, state_key: str, delay_ms: int
diff --git a/tests/rest/client/test_sync_sticky_events.py b/tests/rest/client/test_sync_sticky_events.py
new file mode 100644
index 0000000000..7a38debdb9
--- /dev/null
+++ b/tests/rest/client/test_sync_sticky_events.py
@@ -0,0 +1,529 @@
+#
+# This file is licensed under the Affero General Public License (AGPL) version 3.
+#
+# Copyright (C) 2026, Element Creations Ltd.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# See the GNU Affero General Public License for more details:
+# .
+import json
+import sqlite3
+from dataclasses import dataclass
+from unittest.mock import patch
+from urllib.parse import quote
+
+from twisted.internet.testing import MemoryReactor
+
+from synapse.api.constants import EventTypes, EventUnsignedContentFields, StickyEvent
+from synapse.rest import admin
+from synapse.rest.client import account_data, login, register, room, sync
+from synapse.server import HomeServer
+from synapse.types import JsonDict
+from synapse.util.clock import Clock
+from synapse.util.duration import Duration
+
+from tests import unittest
+from tests.utils import USE_POSTGRES_FOR_TESTS
+
+
+class SyncStickyEventsTestCase(unittest.HomeserverTestCase):
+ """
+ Tests for oldschool (v3) /sync with sticky events (MSC4354)
+ """
+
+ if not USE_POSTGRES_FOR_TESTS and sqlite3.sqlite_version_info < (3, 40, 0):
+ # We need the JSON functionality in SQLite
+ skip = f"SQLite version is too old to support sticky events: {sqlite3.sqlite_version_info} (See https://github.com/element-hq/synapse/issues/19428)"
+
+ servlets = [
+ room.register_servlets,
+ login.register_servlets,
+ register.register_servlets,
+ admin.register_servlets,
+ sync.register_servlets,
+ account_data.register_servlets,
+ ]
+
+ def default_config(self) -> JsonDict:
+ config = super().default_config()
+ config["experimental_features"] = {"msc4354_enabled": True}
+ return config
+
+ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
+ # Register an account
+ self.user_id = self.register_user("user1", "pass")
+ self.token = self.login(self.user_id, "pass")
+
+ # Create a room
+ self.room_id = self.helper.create_room_as(self.user_id, tok=self.token)
+
+ def test_single_sticky_event_appears_in_initial_sync(self) -> None:
+ """
+ Test sending a single sticky event and then doing an initial /sync.
+ """
+
+ # Send a sticky event
+ sticky_event_response = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": "sticky message", "msgtype": "m.text"},
+ tok=self.token,
+ )
+ sticky_event_id = sticky_event_response["event_id"]
+
+ # Perform initial sync
+ channel = self.make_request(
+ "GET",
+ "/sync",
+ access_token=self.token,
+ )
+
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Get timeline events from the sync response
+ timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][
+ "events"
+ ]
+
+ # Verify the sticky event is present and has the sticky TTL field
+ self.assertEqual(
+ timeline_events[-1]["event_id"],
+ sticky_event_id,
+ f"Sticky event {sticky_event_id} not found in sync timeline",
+ )
+ self.assertEqual(
+ timeline_events[-1]["unsigned"][EventUnsignedContentFields.STICKY_TTL],
+ # The other 100 ms is advanced in FakeChannel.await_result.
+ 59_900,
+ )
+
+ self.assertNotIn(
+ "sticky",
+ channel.json_body["rooms"]["join"][self.room_id],
+ "Unexpected sticky section of sync response (sticky event should be deduplicated)",
+ )
+
+ def test_sticky_event_beyond_timeline_in_initial_sync(self) -> None:
+ """
+ Test that when sending a sticky event which is subsequently
+ pushed out of the timeline window by other messages,
+ the sticky event comes down the dedicated sticky section of the sync response.
+ """
+ # Send the first sticky event
+ first_sticky_response = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": "first sticky", "msgtype": "m.text"},
+ tok=self.token,
+ )
+ first_sticky_event_id = first_sticky_response["event_id"]
+
+ # Send 10 regular timeline events,
+ # in order to push the sticky event out of the timeline window
+ # that the /sync will get.
+ regular_event_ids = []
+ for i in range(10):
+ # (Note: each one advances time by 100ms)
+ response = self.helper.send(
+ room_id=self.room_id,
+ body=f"regular message {i}",
+ tok=self.token,
+ )
+ regular_event_ids.append(response["event_id"])
+
+ # Send another sticky event
+ # (Note: this advances time by 100ms)
+ second_sticky_response = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": "second sticky", "msgtype": "m.text"},
+ tok=self.token,
+ )
+ second_sticky_event_id = second_sticky_response["event_id"]
+
+ # Perform initial sync
+ channel = self.make_request(
+ "GET",
+ "/sync",
+ access_token=self.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Get timeline events from the sync response
+ timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][
+ "events"
+ ]
+ timeline_event_ids = [event["event_id"] for event in timeline_events]
+ sticky_events = channel.json_body["rooms"]["join"][self.room_id][
+ "msc4354_sticky"
+ ]["events"]
+
+ # This is canary to check the test setup is valid and that we're actually excluding the sticky event
+ # because it's outside the timeline window, not for some other potential reason.
+ self.assertNotIn(
+ regular_event_ids[0],
+ timeline_event_ids,
+ f"First regular event {regular_event_ids[0]} unexpectedly found in sync timeline (this means our test is invalid)",
+ )
+
+ # Assertions for the first sticky event: should be only in sticky section
+ self.assertNotIn(
+ first_sticky_event_id,
+ timeline_event_ids,
+ f"First sticky event {first_sticky_event_id} unexpectedly found in sync timeline (expected it to be outside the timeline window)",
+ )
+ self.assertEqual(
+ len(sticky_events),
+ 1,
+ f"Expected exactly 1 item in sticky events section, got {sticky_events}",
+ )
+ self.assertEqual(sticky_events[0]["event_id"], first_sticky_event_id)
+ self.assertEqual(
+ # The 'missing' 1100 ms were elapsed when sending events
+ sticky_events[0]["unsigned"][EventUnsignedContentFields.STICKY_TTL],
+ 58_800,
+ )
+
+ # Assertions for the second sticky event: should be only in timeline section
+ self.assertEqual(
+ timeline_events[-1]["event_id"],
+ second_sticky_event_id,
+ f"Second sticky event {second_sticky_event_id} not found in sync timeline",
+ )
+ self.assertEqual(
+ timeline_events[-1]["unsigned"][EventUnsignedContentFields.STICKY_TTL],
+ # The other 100 ms is advanced in FakeChannel.await_result.
+ 59_900,
+ )
+ # (sticky section: we already checked it only has 1 item and
+ # that item was the first above)
+
+ def test_sticky_event_filtered_from_timeline_appears_in_sticky_section(
+ self,
+ ) -> None:
+ """
+ Test that a sticky event which is excluded from the timeline by a timeline filter
+ still appears in the sticky section of the sync response.
+
+ > Interaction with RoomFilter: The RoomFilter does not apply to the sticky.events section,
+ > as it is neither timeline nor state. However, the timeline filter MUST be applied before
+ > applying the deduplication logic above. In other words, if a sticky event would normally
+ > appear in both the timeline.events section and the sticky.events section, but is filtered
+ > out by the timeline filter, the sticky event MUST appear in sticky.events.
+ > — https://github.com/matrix-org/matrix-spec-proposals/blob/4340903c15e9eab1bfb2f6a31cfa08fd535f7e7c/proposals/4354-sticky-events.md#sync-api-changes
+ """
+ # A filter that excludes message events
+ filter_json = json.dumps(
+ {
+ "room": {
+ # We only want these io.element.example events
+ "timeline": {"types": ["io.element.example"]},
+ }
+ }
+ )
+
+ # Send a sticky message event (will be filtered by our filter)
+ sticky_event_id = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": "sticky message", "msgtype": "m.text"},
+ tok=self.token,
+ )["event_id"]
+
+ # Send a non-message event (will pass the filter)
+ nonmessage_event_id = self.helper.send_event(
+ room_id=self.room_id,
+ type="io.element.example",
+ content={"membership": "join"},
+ tok=self.token,
+ )["event_id"]
+
+ # Perform initial sync with our filter
+ channel = self.make_request(
+ "GET",
+ f"/sync?filter={quote(filter_json)}",
+ access_token=self.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Get timeline and sticky events from the sync response
+ timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][
+ "events"
+ ]
+ sticky_events = channel.json_body["rooms"]["join"][self.room_id][
+ "msc4354_sticky"
+ ]["events"]
+
+ # Extract event IDs from the timeline
+ timeline_event_ids = [event["event_id"] for event in timeline_events]
+
+ # The sticky message event should NOT be in the timeline (filtered out)
+ self.assertNotIn(
+ sticky_event_id,
+ timeline_event_ids,
+ f"Sticky message event {sticky_event_id} should be filtered from timeline",
+ )
+
+ # The member event should be in the timeline
+ self.assertIn(
+ nonmessage_event_id,
+ timeline_event_ids,
+ f"Non-message event {nonmessage_event_id} should be in timeline",
+ )
+
+ # The sticky message event MUST appear in the sticky section
+ # because it was filtered from the timeline
+ received_sticky_event_ids = [e["event_id"] for e in sticky_events]
+ self.assertEqual(
+ received_sticky_event_ids,
+ [sticky_event_id],
+ )
+
+ def test_ignored_users_sticky_events(self) -> None:
+ """
+ Test that sticky events from ignored users are not delivered to clients.
+
+ > As with normal events, sticky events sent by ignored users MUST NOT be
+ > delivered to clients.
+ > — https://github.com/matrix-org/matrix-spec-proposals/blob/4340903c15e9eab1bfb2f6a31cfa08fd535f7e7c/proposals/4354-sticky-events.md#sync-api-changes
+ """
+ # Register a second user who will be ignored
+ user2_id = self.register_user("user2", "pass")
+ user2_token = self.login(user2_id, "pass")
+
+ # Join user2 to the room
+ self.helper.join(self.room_id, user2_id, tok=user2_token)
+
+ # User1 ignores user2
+ channel = self.make_request(
+ "PUT",
+ f"/_matrix/client/v3/user/{self.user_id}/account_data/m.ignored_user_list",
+ {"ignored_users": {user2_id: {}}},
+ access_token=self.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # User2 sends a sticky event
+ sticky_response = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": "sticky from ignored user", "msgtype": "m.text"},
+ tok=user2_token,
+ )
+ sticky_event_id = sticky_response["event_id"]
+
+ # User1 syncs
+ channel = self.make_request(
+ "GET",
+ "/sync",
+ access_token=self.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Check timeline events - should not include the sticky event from ignored user
+ timeline_events = channel.json_body["rooms"]["join"][self.room_id]["timeline"][
+ "events"
+ ]
+ timeline_event_ids = [event["event_id"] for event in timeline_events]
+
+ self.assertNotIn(
+ sticky_event_id,
+ timeline_event_ids,
+ f"Sticky event from ignored user {sticky_event_id} should not be in timeline",
+ )
+
+ # Check sticky events section - should also not include the event
+ sticky_events = (
+ channel.json_body["rooms"]["join"][self.room_id]
+ .get("msc4354_sticky", {})
+ .get("events", [])
+ )
+
+ sticky_event_ids = [event["event_id"] for event in sticky_events]
+
+ self.assertNotIn(
+ sticky_event_id,
+ sticky_event_ids,
+ f"Sticky event from ignored user {sticky_event_id} should not be in sticky section",
+ )
+
+ def test_history_visibility_bypass_for_sticky_events(self) -> None:
+ """
+ Test that joined users can see sticky events even when history visibility
+ is set to "joined" and they joined after the event was sent.
+ This is required by MSC4354: "History visibility checks MUST NOT
+ be applied to sticky events. Any joined user is authorised to see
+ sticky events for the duration they remain sticky."
+ """
+ # Create a new room with restricted history visibility
+ room_id = self.helper.create_room_as(
+ self.user_id,
+ tok=self.token,
+ extra_content={
+ # Anyone can join
+ "preset": "public_chat",
+ # But you can't see history before you joined
+ "initial_state": [
+ {
+ "type": EventTypes.RoomHistoryVisibility,
+ "state_key": "",
+ "content": {"history_visibility": "joined"},
+ }
+ ],
+ },
+ is_public=False,
+ )
+
+ # User1 sends a sticky event
+ sticky_event_id = self.helper.send_sticky_event(
+ room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=5),
+ content={"body": "sticky message", "msgtype": "m.text"},
+ tok=self.token,
+ )["event_id"]
+
+ # User1 also sends a regular event, to verify our test setup
+ regular_event_id = self.helper.send(
+ room_id=room_id,
+ body="regular message",
+ tok=self.token,
+ )["event_id"]
+
+ # Register and join a second user after the sticky event was sent
+ user2_id = self.register_user("user2", "pass")
+ user2_token = self.login(user2_id, "pass")
+ self.helper.join(room_id, user2_id, tok=user2_token)
+
+ # User2 syncs - they should see the sticky event even though
+ # history visibility is "joined" and they joined after it was sent
+ channel = self.make_request(
+ "GET",
+ "/sync",
+ access_token=user2_token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Get sticky events from the sync response
+ sticky_events = channel.json_body["rooms"]["join"][room_id]["msc4354_sticky"][
+ "events"
+ ]
+ sticky_event_ids = [event["event_id"] for event in sticky_events]
+
+ # The sticky event should be visible to user2
+ self.assertEqual(
+ [sticky_event_id],
+ sticky_event_ids,
+ f"Sticky event {sticky_event_id} should be visible despite history visibility",
+ )
+
+ # Also check that the regular (non-sticky) event sent at the same time
+ # is NOT visible. This is to verify our test setup.
+ timeline_events = channel.json_body["rooms"]["join"][room_id]["timeline"][
+ "events"
+ ]
+ timeline_event_ids = [event["event_id"] for event in timeline_events]
+
+ # The regular message should NOT be visible (history visibility = joined)
+ self.assertNotIn(
+ regular_event_id,
+ timeline_event_ids,
+ f"Expecting to not see regular event ({regular_event_id}) before user1 joined.",
+ )
+
+ @patch.object(StickyEvent, "MAX_EVENTS_IN_SYNC", 3)
+ def test_pagination_with_many_sticky_events(self) -> None:
+ """
+ Test that pagination works correctly when there are more sticky events than
+ the intended limit.
+
+ The MSC doesn't define a limit or how to set one.
+ See thread: https://github.com/matrix-org/matrix-spec-proposals/pull/4354#discussion_r2885670008
+
+ But Synapse currently emits 100 at a time, controlled by `MAX_EVENTS_IN_SYNC`.
+ In this test we patch it to 3 (as sending 100 events is not very efficient).
+ """
+
+ # A filter that excludes message events
+ # This is needed so that the sticky events come down the sticky section
+ # and not the timeline section, which would hamper our test.
+ filter_json = json.dumps(
+ {
+ "room": {
+ # We only want these io.element.example events
+ "timeline": {"types": ["io.element.example"]},
+ }
+ }
+ )
+
+ # Send 8 sticky events: enough for 2 full pages and then a partial page with 2.
+ sent_sticky_event_ids = []
+ for i in range(8):
+ response = self.helper.send_sticky_event(
+ self.room_id,
+ EventTypes.Message,
+ duration=Duration(minutes=1),
+ content={"body": f"sticky message {i}", "msgtype": "m.text"},
+ tok=self.token,
+ )
+ sent_sticky_event_ids.append(response["event_id"])
+
+ @dataclass
+ class SyncHelperResponse:
+ sticky_event_ids: list[str]
+ """Event IDs of events returned from the sticky section, in-order."""
+
+ next_batch: str
+ """Sync token to pass to `?since=` in order to do an incremental sync."""
+
+ def _do_sync(*, since: str | None) -> SyncHelperResponse:
+ """Small helper to do a sync and get the sticky events out."""
+ and_since_param = "" if since is None else f"&since={since}"
+ channel = self.make_request(
+ "GET",
+ f"/sync?filter={quote(filter_json)}{and_since_param}",
+ access_token=self.token,
+ )
+ self.assertEqual(channel.code, 200, channel.result)
+
+ # Get sticky events from the sync response
+ sticky_events = []
+ # In this test, when no sticky events are in the response,
+ # we don't have a rooms section at all
+ if "rooms" in channel.json_body:
+ sticky_events = channel.json_body["rooms"]["join"][self.room_id][
+ "msc4354_sticky"
+ ]["events"]
+
+ return SyncHelperResponse(
+ sticky_event_ids=[
+ sticky_event["event_id"] for sticky_event in sticky_events
+ ],
+ next_batch=channel.json_body["next_batch"],
+ )
+
+ # Perform initial sync, we should get the first 3 sticky events,
+ # in order.
+ sync1 = _do_sync(since=None)
+ self.assertEqual(sync1.sticky_event_ids, sent_sticky_event_ids[0:3])
+
+ # Now do an incremental sync and expect the next page of 3
+ sync2 = _do_sync(since=sync1.next_batch)
+ self.assertEqual(sync2.sticky_event_ids, sent_sticky_event_ids[3:6])
+
+ # Now do another incremental sync and expect the last 2
+ sync3 = _do_sync(since=sync2.next_batch)
+ self.assertEqual(sync3.sticky_event_ids, sent_sticky_event_ids[6:8])
+
+ # Finally, expect an empty incremental sync at the end
+ sync4 = _do_sync(since=sync3.next_batch)
+ self.assertEqual(sync4.sticky_event_ids, [])
diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py
index 5cd4262bfc..9373303666 100644
--- a/tests/rest/client/test_third_party_rules.py
+++ b/tests/rest/client/test_third_party_rules.py
@@ -830,9 +830,9 @@ def test_on_user_deactivation_status_changed(self) -> None:
self.assertTrue(args[1])
self.assertFalse(args[2])
- # Check that the profile update callback was called twice (once for the display
- # name and once for the avatar URL), and that the "deactivation" boolean is true.
- self.assertEqual(profile_mock.call_count, 2)
+ # Check that the profile update callback was called once
+ # and that the "deactivation" boolean is true.
+ self.assertEqual(profile_mock.call_count, 1)
args = profile_mock.call_args[0]
self.assertTrue(args[3])
diff --git a/tests/rest/synapse/mas/test_users.py b/tests/rest/synapse/mas/test_users.py
index 4e8cf90700..6f44761bb8 100644
--- a/tests/rest/synapse/mas/test_users.py
+++ b/tests/rest/synapse/mas/test_users.py
@@ -10,11 +10,14 @@
#
# See the GNU Affero General Public License for more details:
# .
-
+from http import HTTPStatus
from urllib.parse import urlencode
+from parameterized import parameterized
+
from twisted.internet.testing import MemoryReactor
+from synapse.api.errors import StoreError
from synapse.appservice import ApplicationService
from synapse.server import HomeServer
from synapse.types import JsonDict, UserID, create_requester
@@ -359,6 +362,64 @@ def test_provision_user_invalid_json_types(self) -> None:
)
self.assertEqual(channel.code, 400, f"Should fail for content: {content}")
+ def test_lock_and_unlock(self) -> None:
+ store = self.hs.get_datastores().main
+
+ # Create a user in the locked state
+ alice = UserID("alice", "test")
+ channel = self.make_request(
+ "POST",
+ "/_synapse/mas/provision_user",
+ shorthand=False,
+ access_token=self.SHARED_SECRET,
+ content={
+ "localpart": alice.localpart,
+ "locked": True,
+ },
+ )
+ # This created the user, hence the 201 status code
+ self.assertEqual(channel.code, 201, channel.json_body)
+ self.assertEqual(channel.json_body, {})
+ self.assertTrue(
+ self.get_success(store.get_user_locked_status(alice.to_string()))
+ )
+
+ # Then transition from locked to unlocked
+ channel = self.make_request(
+ "POST",
+ "/_synapse/mas/provision_user",
+ shorthand=False,
+ access_token=self.SHARED_SECRET,
+ content={
+ "localpart": alice.localpart,
+ "locked": False,
+ },
+ )
+ # This updated the user, hence the 200 status code
+ self.assertEqual(channel.code, 200, channel.json_body)
+ self.assertEqual(channel.json_body, {})
+ self.assertFalse(
+ self.get_success(store.get_user_locked_status(alice.to_string()))
+ )
+
+ # And back from unlocked to locked
+ channel = self.make_request(
+ "POST",
+ "/_synapse/mas/provision_user",
+ shorthand=False,
+ access_token=self.SHARED_SECRET,
+ content={
+ "localpart": alice.localpart,
+ "locked": True,
+ },
+ )
+ # This updated the user, hence the 200 status code
+ self.assertEqual(channel.code, 200, channel.json_body)
+ self.assertEqual(channel.json_body, {})
+ self.assertTrue(
+ self.get_success(store.get_user_locked_status(alice.to_string()))
+ )
+
@skip_unless(HAS_AUTHLIB, "requires authlib")
class MasIsLocalpartAvailableResource(BaseTestCase):
@@ -621,6 +682,78 @@ def test_delete_user_erase(self) -> None:
# And erased
self.assertTrue(self.get_success(store.is_user_erased(user_id=str(alice))))
+ @parameterized.expand([(False,), (True,)])
+ def test_user_deactivation_removes_user_from_user_directory(
+ self, erase: bool
+ ) -> None:
+ """
+ Test that when `/delete_user` deactivates a user, they get removed from the user directory.
+
+ This is applicable regardless of whether the `erase` flag is set.
+
+ Regression test for:
+ 'Users are not removed from user directory upon deactivation if erase flag is true'
+ https://github.com/element-hq/synapse/issues/19540
+ """
+ alice = UserID("alice", "test")
+ store = self.hs.get_datastores().main
+
+ # Ensure we're testing what we think we are:
+ # check the user is IN the directory at the start of the test
+ self.assertEqual(
+ self.get_success(store._get_user_in_directory(alice.to_string())),
+ ("Alice", None),
+ )
+
+ channel = self.make_request(
+ "POST",
+ "/_synapse/mas/delete_user",
+ shorthand=False,
+ access_token=self.SHARED_SECRET,
+ content={"localpart": "alice", "erase": erase},
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ self.assertEqual(channel.json_body, {})
+
+ self.assertIsNone(
+ self.get_success(store._get_user_in_directory(alice.to_string())),
+ "User still present in user directory after deactivation!",
+ )
+
+ def test_user_deactivation_removes_custom_profile_fields(self) -> None:
+ """
+ Test that when `/delete_user` deactivates a user with the `erase` flag,
+ their custom profile fields get removed.
+ """
+ alice = UserID("alice", "test")
+ store = self.hs.get_datastores().main
+
+ # Add custom profile field
+ self.get_success(store.set_profile_field(alice, "io.element.example", "hello"))
+
+ # Ensure we're testing what we think we are:
+ # check the user has profile data at the start of the test
+ self.assertEqual(
+ self.get_success(store.get_profile_fields(alice)),
+ {"io.element.example": "hello"},
+ )
+
+ channel = self.make_request(
+ "POST",
+ "/_synapse/mas/delete_user",
+ shorthand=False,
+ access_token=self.SHARED_SECRET,
+ content={"localpart": "alice", "erase": True},
+ )
+ self.assertEqual(channel.code, 200, channel.json_body)
+ self.assertEqual(channel.json_body, {})
+
+ # With no profile, we expect a 404 (Not Found) StoreError
+ with self.assertRaises(StoreError) as raised:
+ self.get_success_or_raise(store.get_profile_fields(alice))
+
+ self.assertEqual(raised.exception.code, HTTPStatus.NOT_FOUND)
+
def test_delete_user_missing_localpart(self) -> None:
channel = self.make_request(
"POST",