diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 0cdc20e19c31..b41ec0b6e2dc 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -109,11 +109,26 @@ def set_output(key: str, value: str): "postgres": "multi-postgres", "workers": "workers", }, + { + "sytest-tag": "focal", + "postgres": "multi-postgres", + "workers": "workers", + "reactor": "asyncio", + }, ] if not IS_PR: sytest_tests.extend( [ + { + "sytest-tag": "focal", + "reactor": "asyncio", + }, + { + "sytest-tag": "focal", + "postgres": "postgres", + "reactor": "asyncio", + }, { "sytest-tag": "testing", "postgres": "postgres", diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index c3638c35eb14..839b895c82ca 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -21,4 +21,8 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56 0a00b7ff14890987f09112a2ae696c61001e6cf1 # Convert tests/rest/admin/test_room.py to unix file endings (#7953). -c4268e3da64f1abb5b31deaeb5769adb6510c0a7 \ No newline at end of file +c4268e3da64f1abb5b31deaeb5769adb6510c0a7 + +# Update black to 23.1.0 (#15103) +9bb2eac71962970d02842bca441f4bcdbbf93a11 + diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 4bbe5decf88e..602f5e1759d0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -10,6 +10,7 @@ on: permissions: contents: read + packages: write jobs: build: @@ -34,11 +35,20 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Log in to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Calculate docker image tag id: set-tag uses: docker/metadata-action@master with: - images: matrixdotorg/synapse + images: | + docker.io/matrixdotorg/synapse + ghcr.io/matrix-org/synapse flavor: | latest=false tags: | diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index ef7a38144e00..a5e74eb297db 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@bd10f381a96414ce2b13a11bfa89902ba7cea07f # v2.24.3 + uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index d41f6c449055..6634f2644ee8 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -12,7 +12,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 @@ -39,7 +39,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 99fc2cee08c4..6da7c22e4c33 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -134,7 +134,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml new file mode 100644 index 000000000000..ae4d27f2de44 --- /dev/null +++ b/.github/workflows/poetry_lockfile.yaml @@ -0,0 +1,24 @@ +on: + push: + branches: ["develop", "release-*"] + paths: + - poetry.lock + pull_request: + paths: + - poetry.lock + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-sdists: + name: "Check locked dependencies have sdists" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.x' + - run: pip install tomli + - run: ./scripts-dev/check_locked_deps_have_sdists.py diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index f26143de6bbf..b76c4cb32382 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -48,7 +48,7 @@ jobs: with: ref: master - name: Login to registry - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index bf57bcab6104..ebd7d298a9e9 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -4,13 +4,15 @@ name: Build release artifacts on: # we build on PRs and develop to (hopefully) get early warning - # of things breaking (but only build one set of debs) + # of things breaking (but only build one set of debs). PRs skip + # building wheels on macOS & ARM. pull_request: push: branches: ["develop", "release-*"] # we do the full build on tags. tags: ["v*"] + merge_group: workflow_dispatch: concurrency: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e945ffe7f3ca..d2b1d75536bf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,6 +4,7 @@ on: push: branches: ["develop", "release-*"] pull_request: + merge_group: workflow_dispatch: concurrency: @@ -112,7 +113,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 components: clippy @@ -134,7 +135,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: nightly-2022-12-01 components: clippy @@ -154,9 +155,10 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: - toolchain: 1.58.1 + # We use nightly so that it correctly groups together imports + toolchain: nightly-2022-12-01 components: rustfmt - uses: Swatinem/rust-cache@v2 @@ -221,7 +223,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -266,7 +268,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -368,6 +370,7 @@ jobs: SYTEST_BRANCH: ${{ github.head_ref }} POSTGRES: ${{ matrix.job.postgres && 1}} MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}} + ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }} WORKERS: ${{ matrix.job.workers && 1 }} BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }} TOP: ${{ github.workspace }} @@ -386,7 +389,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -531,7 +534,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -562,7 +565,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -585,7 +588,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: nightly-2022-12-01 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml index 0f0397cf5bc6..24dac47bf247 100644 --- a/.github/workflows/triage-incoming.yml +++ b/.github/workflows/triage-incoming.yml @@ -6,7 +6,7 @@ on: jobs: triage: - uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1 + uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2 with: project_id: 'PVT_kwDOAIB0Bs4AFDdZ' content_id: ${{ github.event.issue.node_id }} diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index a59c8dac09b8..db514571c4af 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -43,7 +43,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -82,7 +82,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/CHANGES.md b/CHANGES.md index a62bd4eb28a3..5f2a4a41eb42 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,303 @@ +Synapse 1.80.0 (2023-03-28) +=========================== + +No significant changes since 1.80.0rc2. + + +Synapse 1.80.0rc2 (2023-03-22) +============================== + +Bugfixes +-------- + +- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300)) +- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) + would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301)) + + +Synapse 1.80.0rc1 (2023-03-21) +============================== + +Features +-------- + +- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187)) +- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249)) +- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268)) +- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) +- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) + + +Bugfixes +-------- + +- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190)) +- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232)) +- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) + + +Updates to the Docker image +--------------------------- + +- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239)) +- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282)) + + +Improved Documentation +---------------------- + +- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223)) + + +Internal Changes +---------------- + +- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921)) +- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974)) +- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200)) +- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222)) +- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238)) +- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237)) +- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244)) +- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247)) +- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) +- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) +- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) +- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) +- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) +- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) +- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252)) +- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253)) +- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254)) +- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255)) +- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256)) +- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257)) +- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286)) +- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287)) +- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288)) +- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289)) +- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290)) +- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291)) + + + +Synapse 1.79.0 (2023-03-14) +=========================== + +No significant changes since 1.79.0rc2. + + +Synapse 1.79.0rc2 (2023-03-13) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227)) +- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248)) + + +Internal Changes +---------------- + +- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240)) + + +Synapse 1.79.0rc1 (2023-03-07) +============================== + +Features +-------- + +- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) +- Experimental support for [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967) to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) +- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107)) +- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116)) +- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133)) +- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134)) +- Update support for [MSC2677](https://github.com/matrix-org/matrix-spec-proposals/pull/2677): remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) +- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869)) +- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088)) +- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) +- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) +- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) +- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180)) +- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193)) +- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143)) + + +Updates to the Docker image +--------------------------- + +- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186)) + + +Improved Documentation +---------------------- + +- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026)) +- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071)) +- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112)) +- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139)) +- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148)) +- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168)) + + +Deprecations and Removals +------------------------- + +- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) +- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) +- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) +- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) +- Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) + + +Internal Changes +---------------- + +- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101)) +- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051)) +- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095)) +- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135)) +- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138)) +- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175)) +- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164)) +- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165)) +- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167)) +- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184)) +- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188)) +- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191)) +- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192)) +- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194)) +- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215)) +- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) + +
Locked dependency updates + + - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) + - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) + - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) + - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) + - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) + - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) + - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) + - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) + - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) + - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) + - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) + - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) + - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) + - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) + - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157)) +
+ + +Synapse 1.78.0 (2023-02-28) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150)) + + +Synapse 1.78.0rc1 (2023-02-21) +============================== + +Features +-------- + +- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964)) +- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004)) +- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034)) +- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042)) +- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) +- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073)) +- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075)) + + +Bugfixes +-------- + +- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779)) +- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605)) +- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038)) +- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068)) +- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074)) +- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079)) +- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080)) +- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096)) +- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108)) + + +Improved Documentation +---------------------- + +- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022)) +- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959)) +- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078)) +- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083)) + + +Internal Changes +---------------- + +- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755)) +- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606)) +- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675)) +- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742)) +- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834)) +- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929)) +- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973)) +- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977)) +- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980)) +- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982)) +- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037)) +- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040)) +- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041)) +- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043)) +- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047)) +- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053)) +- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054)) +- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069)) +- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070)) +- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114)) +- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120)) + +
Locked dependency updates + +- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059)) +- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020)) +- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033)) +- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060)) +- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061)) +- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062)) +- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063)) +- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064)) +- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065)) +- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099)) +- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100)) +- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101)) +- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102)) +- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104)) +- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105)) + + +
+ + Synapse 1.77.0 (2023-02-14) =========================== @@ -63,7 +363,7 @@ Internal Changes - Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014)) - Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) -
Dependabot updates +
Locked dependency updates - Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) - Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) diff --git a/Cargo.lock b/Cargo.lock index a9219eac11e3..2bad27154391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" @@ -185,9 +185,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.46" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b" +checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c" dependencies = [ "arc-swap", "log", @@ -250,7 +250,7 @@ dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn", + "syn 1.0.104", ] [[package]] @@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.104", ] [[package]] @@ -276,9 +276,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.152" +version = "1.0.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "707de5fcf5df2b5788fca98dd7eab490bc2fd9b7ef1404defc462833b83f25ca" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "78997f4555c22a7971214540c4a661291970619afd56de19f77e0de86296e1e5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.2", ] [[package]] name = "serde_json" -version = "1.0.92" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", @@ -375,6 +375,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59d3276aee1fa0c33612917969b5172b5be2db051232a6e4826f1a1a9191b045" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "synapse" version = "0.1.0" diff --git a/contrib/datagrip/README.md b/contrib/datagrip/README.md new file mode 100644 index 000000000000..bbe4f3a5a323 --- /dev/null +++ b/contrib/datagrip/README.md @@ -0,0 +1,28 @@ +# Schema symlinks + +This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc. + +In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against). + +## Example +![](datagrip-aware-of-schema.png) + +## Caveats + +- Doesn't include temporary tables created ad-hoc by Synapse. +- Postgres only. IDEs will likely be confused by SQLite-specific queries. +- Will not include migrations created after the latest schema dump. +- Symlinks might confuse checkouts on Windows systems. + +## Instructions + +### Jetbrains IDEs with DataGrip plugin + +- View -> Tool Windows -> Database +- `+` Icon -> DDL Data Source +- Pick a name, e.g. `Synapse schema dump` +- Under sources, click `+`. +- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL. +- OK, and OK. +- IDE should now be aware of the schema. +- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`. \ No newline at end of file diff --git a/contrib/datagrip/common.sql b/contrib/datagrip/common.sql new file mode 120000 index 000000000000..28c5aa8a1b8b --- /dev/null +++ b/contrib/datagrip/common.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/contrib/datagrip/datagrip-aware-of-schema.png b/contrib/datagrip/datagrip-aware-of-schema.png new file mode 100644 index 000000000000..653642da9130 Binary files /dev/null and b/contrib/datagrip/datagrip-aware-of-schema.png differ diff --git a/contrib/datagrip/main.sql b/contrib/datagrip/main.sql new file mode 120000 index 000000000000..eec0a2fb6dc3 --- /dev/null +++ b/contrib/datagrip/main.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/contrib/datagrip/schema_version.sql b/contrib/datagrip/schema_version.sql new file mode 120000 index 000000000000..e1b0985d7473 --- /dev/null +++ b/contrib/datagrip/schema_version.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/common/schema_version.sql \ No newline at end of file diff --git a/contrib/datagrip/state.sql b/contrib/datagrip/state.sql new file mode 120000 index 000000000000..4de4fbbdf7f0 --- /dev/null +++ b/contrib/datagrip/state.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md index bdd3dd32e07f..d3cdfe5614bb 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md @@ -68,6 +68,7 @@ redis: enabled: true host: redis port: 6379 + # dbid: # password: ``` diff --git a/debian/changelog b/debian/changelog index ea651438f1dc..98366d49166b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,52 @@ +matrix-synapse-py3 (1.80.0) stable; urgency=medium + + * New Synapse release 1.80.0. + + -- Synapse Packaging team Tue, 28 Mar 2023 11:10:33 +0100 + +matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium + + * New Synapse release 1.80.0rc2. + + -- Synapse Packaging team Wed, 22 Mar 2023 08:30:16 -0700 + +matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium + + * New Synapse release 1.80.0rc1. + + -- Synapse Packaging team Tue, 21 Mar 2023 10:56:08 -0700 + +matrix-synapse-py3 (1.79.0) stable; urgency=medium + + * New Synapse release 1.79.0. + + -- Synapse Packaging team Tue, 14 Mar 2023 16:14:50 +0100 + +matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium + + * New Synapse release 1.79.0rc2. + + -- Synapse Packaging team Mon, 13 Mar 2023 12:54:21 +0000 + +matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium + + * New Synapse release 1.79.0rc1. + + -- Synapse Packaging team Tue, 07 Mar 2023 12:03:49 +0000 + +matrix-synapse-py3 (1.78.0) stable; urgency=medium + + * New Synapse release 1.78.0. + + -- Synapse Packaging team Tue, 28 Feb 2023 08:56:03 -0800 + +matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium + + * Add `matrix-org-archive-keyring` package as recommended. + * New Synapse release 1.78.0rc1. + + -- Synapse Packaging team Tue, 21 Feb 2023 14:29:19 +0000 + matrix-synapse-py3 (1.77.0) stable; urgency=medium * New Synapse release 1.77.0. diff --git a/debian/control b/debian/control index bc628cec085e..2ff55db5de62 100644 --- a/debian/control +++ b/debian/control @@ -37,6 +37,7 @@ Depends: # so we put perl:Depends in Suggests rather than Depends. Recommends: ${shlibs1:Recommends}, + matrix-org-archive-keyring, Suggests: sqlite3, ${perl:Depends}, diff --git a/docker/Dockerfile b/docker/Dockerfile index bfa0cb752324..8682b2f7dc90 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -38,10 +38,12 @@ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - build-essential git libffi-dev libssl-dev \ + build-essential curl git libffi-dev libssl-dev \ && rm -rf /var/lib/apt/lists/* -# Install rust and ensure its in the PATH +# Install rust and ensure its in the PATH. +# (Rust may be needed to compile `cryptography`---which is one of poetry's +# dependencies---on platforms that don't have a `cryptography` wheel. ENV RUSTUP_HOME=/rust ENV CARGO_HOME=/cargo ENV PATH=/cargo/bin:/rust/bin:$PATH @@ -49,6 +51,11 @@ RUN mkdir /rust /cargo RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal +# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not +# set to true, so we expose it as a build-arg. +ARG CARGO_NET_GIT_FETCH_WITH_CLI=false +ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI + # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. RUN --mount=type=cache,target=/root/.cache/pip \ @@ -193,12 +200,7 @@ RUN \ openssl \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /install/bin /usr/local/bin -# Copy the python site-packages into /install instead of /usr/local/lib as some environments use different paths -# e.g pyston using /usr/lib, so we instead specify a PYTHONPATH environment variable to make everyone happy. -COPY --from=builder /install/lib/*/site-packages /install -ENV PYTHONPATH="/install" - +COPY --from=builder /install /usr/local COPY ./docker/start.py /start.py COPY ./docker/conf /conf diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index af13209c54e9..5560ab8b95a8 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then # -z True if the length of string is zero. if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then export SYNAPSE_WORKER_TYPES="\ - event_persister, \ - event_persister, \ + event_persister:2, \ background_worker, \ frontend_proxy, \ event_creator, \ @@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then synchrotron, \ client_reader, \ appservice, \ - pusher" + pusher, \ + stream_writers=account_data+presence+receipts+to_device+typing" fi log "Workers requested: $SYNAPSE_WORKER_TYPES" diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 58c62f2231f3..3f2f5c2daf85 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -19,8 +19,15 @@ # The environment variables it reads are: # * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. # * SYNAPSE_REPORT_STATS: Whether to report stats. -# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG -# below. Leave empty for no workers. +# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG +# below. Leave empty for no workers. Add a ':' and a number at the end to +# multiply that worker. Append multiple worker types with '+' to merge the +# worker types into a single worker. Add a name and a '=' to the front of a +# worker type to give this instance a name in logs and nginx. +# Examples: +# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader' +# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader' +# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing' # * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files # will be treated as Application Service registration files. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format. @@ -40,16 +47,33 @@ import os import platform +import re import subprocess import sys +from collections import defaultdict +from itertools import chain from pathlib import Path -from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set +from typing import ( + Any, + Dict, + List, + Mapping, + MutableMapping, + NoReturn, + Optional, + Set, + SupportsIndex, +) import yaml from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 +# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced +# during processing with the name of the worker. +WORKER_PLACEHOLDER_NAME = "placeholder_name" + # Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources # Watching /_matrix/client needs a "client" listener # Watching /_matrix/federation needs a "federation" listener @@ -70,11 +94,13 @@ "endpoint_patterns": [ "^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$" ], - "shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"}, + "shared_extra_conf": { + "update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME + }, "worker_extra_conf": "", }, "media_repository": { - "app": "synapse.app.media_repository", + "app": "synapse.app.generic_worker", "listener_resources": ["media"], "endpoint_patterns": [ "^/_matrix/media/", @@ -87,7 +113,7 @@ # The first configured media worker will run the media background jobs "shared_extra_conf": { "enable_media_repo": False, - "media_instance_running_background_jobs": "media_repository1", + "media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME, }, "worker_extra_conf": "enable_media_repo: true", }, @@ -95,7 +121,9 @@ "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"notify_appservices_from_worker": "appservice1"}, + "shared_extra_conf": { + "notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME + }, "worker_extra_conf": "", }, "federation_sender": { @@ -135,6 +163,7 @@ "^/_matrix/client/versions$", "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$", "^/_matrix/client/(r0|v3|unstable)/register$", + "^/_matrix/client/(r0|v3|unstable)/register/available$", "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event", @@ -142,6 +171,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases", "^/_matrix/client/v1/rooms/.*/timestamp_to_event$", "^/_matrix/client/(api/v1|r0|v3|unstable)/search", + "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)", ], "shared_extra_conf": {}, "worker_extra_conf": "", @@ -191,9 +221,9 @@ "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - # This worker cannot be sharded. Therefore there should only ever be one background - # worker, and it should be named background_worker1 - "shared_extra_conf": {"run_background_tasks_on": "background_worker1"}, + # This worker cannot be sharded. Therefore, there should only ever be one + # background worker. This is enforced for the safety of your database. + "shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME}, "worker_extra_conf": "", }, "event_creator": { @@ -204,6 +234,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", "^/_matrix/client/(api/v1|r0|v3|unstable)/join/", + "^/_matrix/client/(api/v1|r0|v3|unstable)/knock/", "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/", "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send", ], @@ -273,7 +304,7 @@ """ NGINX_UPSTREAM_CONFIG_BLOCK = """ -upstream {upstream_worker_type} {{ +upstream {upstream_worker_base_name} {{ {body} }} """ @@ -324,7 +355,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None: def add_worker_roles_to_shared_config( shared_config: dict, - worker_type: str, + worker_types_set: Set[str], worker_name: str, worker_port: int, ) -> None: @@ -332,22 +363,36 @@ def add_worker_roles_to_shared_config( append appropriate worker information to it for the current worker_type instance. Args: - shared_config: The config dict that all worker instances share (after being converted to YAML) - worker_type: The type of worker (one of those defined in WORKERS_CONFIG). + shared_config: The config dict that all worker instances share (after being + converted to YAML) + worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG). + This list can be a single worker type or multiple. worker_name: The name of the worker instance. worker_port: The HTTP replication port that the worker instance is listening on. """ - # The instance_map config field marks the workers that write to various replication streams + # The instance_map config field marks the workers that write to various replication + # streams instance_map = shared_config.setdefault("instance_map", {}) - # Worker-type specific sharding config - if worker_type == "pusher": + # This is a list of the stream_writers that there can be only one of. Events can be + # sharded, and therefore doesn't belong here. + singular_stream_writers = [ + "account_data", + "presence", + "receipts", + "to_device", + "typing", + ] + + # Worker-type specific sharding config. Now a single worker can fulfill multiple + # roles, check each. + if "pusher" in worker_types_set: shared_config.setdefault("pusher_instances", []).append(worker_name) - elif worker_type == "federation_sender": + if "federation_sender" in worker_types_set: shared_config.setdefault("federation_sender_instances", []).append(worker_name) - elif worker_type == "event_persister": + if "event_persister" in worker_types_set: # Event persisters write to the events stream, so we need to update # the list of event stream writers shared_config.setdefault("stream_writers", {}).setdefault("events", []).append( @@ -360,19 +405,154 @@ def add_worker_roles_to_shared_config( "port": worker_port, } - elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]: - # Update the list of stream writers - # It's convenient that the name of the worker type is the same as the stream to write - shared_config.setdefault("stream_writers", {}).setdefault( - worker_type, [] - ).append(worker_name) + # Update the list of stream writers. It's convenient that the name of the worker + # type is the same as the stream to write. Iterate over the whole list in case there + # is more than one. + for worker in worker_types_set: + if worker in singular_stream_writers: + shared_config.setdefault("stream_writers", {}).setdefault( + worker, [] + ).append(worker_name) + + # Map of stream writer instance names to host/ports combos + # For now, all stream writers need http replication ports + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } + + +def merge_worker_template_configs( + existing_dict: Optional[Dict[str, Any]], + to_be_merged_dict: Dict[str, Any], +) -> Dict[str, Any]: + """When given an existing dict of worker template configuration consisting with both + dicts and lists, merge new template data from WORKERS_CONFIG(or create) and + return new dict. + + Args: + existing_dict: Either an existing worker template or a fresh blank one. + to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into + existing_dict. + Returns: The newly merged together dict values. + """ + new_dict: Dict[str, Any] = {} + if not existing_dict: + # It doesn't exist yet, just use the new dict(but take a copy not a reference) + new_dict = to_be_merged_dict.copy() + else: + for i in to_be_merged_dict.keys(): + if (i == "endpoint_patterns") or (i == "listener_resources"): + # merge the two lists, remove duplicates + new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i])) + elif i == "shared_extra_conf": + # merge dictionary's, the worker name will be replaced later + new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]} + elif i == "worker_extra_conf": + # There is only one worker type that has a 'worker_extra_conf' and it is + # the media_repo. Since duplicate worker types on the same worker don't + # work, this is fine. + new_dict[i] = existing_dict[i] + to_be_merged_dict[i] + else: + # Everything else should be identical, like "app", which only works + # because all apps are now generic_workers. + new_dict[i] = to_be_merged_dict[i] + return new_dict - # Map of stream writer instance names to host/ports combos - # For now, all stream writers need http replication ports - instance_map[worker_name] = { - "host": "localhost", - "port": worker_port, - } + +def insert_worker_name_for_worker_config( + existing_dict: Dict[str, Any], worker_name: str +) -> Dict[str, Any]: + """Insert a given worker name into the worker's configuration dict. + + Args: + existing_dict: The worker_config dict that is imported into shared_config. + worker_name: The name of the worker to insert. + Returns: Copy of the dict with newly inserted worker name + """ + dict_to_edit = existing_dict.copy() + for k, v in dict_to_edit["shared_extra_conf"].items(): + # Only proceed if it's the placeholder name string + if v == WORKER_PLACEHOLDER_NAME: + dict_to_edit["shared_extra_conf"][k] = worker_name + return dict_to_edit + + +def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]: + """ + Apply multiplier(if found) by returning a new expanded list with some basic error + checking. + + Args: + worker_types: The unprocessed List of requested workers + Returns: + A new list with all requested workers expanded. + """ + # Checking performed: + # 1. if worker:2 or more is declared, it will create additional workers up to number + # 2. if worker:1, it will create a single copy of this worker as if no number was + # given + # 3. if worker:0 is declared, this worker will be ignored. This is to allow for + # scripting and automated expansion and is intended behaviour. + # 4. if worker:NaN or is a negative number, it will error and log it. + new_worker_types = [] + for worker_type in worker_types: + if ":" in worker_type: + worker_type_components = split_and_strip_string(worker_type, ":", 1) + worker_count = 0 + # Should only be 2 components, a type of worker(s) and an integer as a + # string. Cast the number as an int then it can be used as a counter. + try: + worker_count = int(worker_type_components[1]) + except ValueError: + error( + f"Bad number in worker count for '{worker_type}': " + f"'{worker_type_components[1]}' is not an integer" + ) + + # As long as there are more than 0, we add one to the list to make below. + for _ in range(worker_count): + new_worker_types.append(worker_type_components[0]) + + else: + # If it's not a real worker_type, it will error out later. + new_worker_types.append(worker_type) + return new_worker_types + + +def is_sharding_allowed_for_worker_type(worker_type: str) -> bool: + """Helper to check to make sure worker types that cannot have multiples do not. + + Args: + worker_type: The type of worker to check against. + Returns: True if allowed, False if not + """ + return worker_type not in [ + "background_worker", + "account_data", + "presence", + "receipts", + "typing", + "to_device", + ] + + +def split_and_strip_string( + given_string: str, split_char: str, max_split: SupportsIndex = -1 +) -> List[str]: + """ + Helper to split a string on split_char and strip whitespace from each end of each + element. + Args: + given_string: The string to split + split_char: The character to split the string on + max_split: kwarg for split() to limit how many times the split() happens + Returns: + A List of strings + """ + # Removes whitespace from ends of result strings before adding to list. Allow for + # overriding 'maxsplit' kwarg, default being -1 to signify no maximum. + return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)] def generate_base_homeserver_config() -> None: @@ -387,29 +567,153 @@ def generate_base_homeserver_config() -> None: subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True) +def parse_worker_types( + requested_worker_types: List[str], +) -> Dict[str, Set[str]]: + """Read the desired list of requested workers and prepare the data for use in + generating worker config files while also checking for potential gotchas. + + Args: + requested_worker_types: The list formed from the split environment variable + containing the unprocessed requests for workers. + + Returns: A dict of worker names to set of worker types. Format: + {'worker_name': + {'worker_type', 'worker_type2'} + } + """ + # A counter of worker_base_name -> int. Used for determining the name for a given + # worker when generating its config file, as each worker's name is just + # worker_base_name followed by instance number + worker_base_name_counter: Dict[str, int] = defaultdict(int) + + # Similar to above, but more finely grained. This is used to determine we don't have + # more than a single worker for cases where multiples would be bad(e.g. presence). + worker_type_shard_counter: Dict[str, int] = defaultdict(int) + + # The final result of all this processing + dict_to_return: Dict[str, Set[str]] = {} + + # Handle any multipliers requested for given workers. + multiple_processed_worker_types = apply_requested_multiplier_for_worker( + requested_worker_types + ) + + # Process each worker_type_string + # Examples of expected formats: + # - requested_name=type1+type2+type3 + # - synchrotron + # - event_creator+event_persister + for worker_type_string in multiple_processed_worker_types: + # First, if a name is requested, use that — otherwise generate one. + worker_base_name: str = "" + if "=" in worker_type_string: + # Split on "=", remove extra whitespace from ends then make list + worker_type_split = split_and_strip_string(worker_type_string, "=") + if len(worker_type_split) > 2: + error( + "There should only be one '=' in the worker type string. " + f"Please fix: {worker_type_string}" + ) + + # Assign the name + worker_base_name = worker_type_split[0] + + if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name): + # Apply a fairly narrow regex to the worker names. Some characters + # aren't safe for use in file paths or nginx configurations. + # Don't allow to end with a number because we'll add a number + # ourselves in a moment. + error( + "Invalid worker name; please choose a name consisting of " + "alphanumeric letters, _ + -, but not ending with a digit: " + f"{worker_base_name!r}" + ) + + # Continue processing the remainder of the worker_type string + # with the name override removed. + worker_type_string = worker_type_split[1] + + # Split the worker_type_string on "+", remove whitespace from ends then make + # the list a set so it's deduplicated. + worker_types_set: Set[str] = set( + split_and_strip_string(worker_type_string, "+") + ) + + if not worker_base_name: + # No base name specified: generate one deterministically from set of + # types + worker_base_name = "+".join(sorted(worker_types_set)) + + # At this point, we have: + # worker_base_name which is the name for the worker, without counter. + # worker_types_set which is the set of worker types for this worker. + + # Validate worker_type and make sure we don't allow sharding for a worker type + # that doesn't support it. Will error and stop if it is a problem, + # e.g. 'background_worker'. + for worker_type in worker_types_set: + # Verify this is a real defined worker type. If it's not, stop everything so + # it can be fixed. + if worker_type not in WORKERS_CONFIG: + error( + f"{worker_type} is an unknown worker type! Was found in " + f"'{worker_type_string}'. Please fix!" + ) + + if worker_type in worker_type_shard_counter: + if not is_sharding_allowed_for_worker_type(worker_type): + error( + f"There can be only a single worker with {worker_type} " + "type. Please recount and remove." + ) + # Not in shard counter, must not have seen it yet, add it. + worker_type_shard_counter[worker_type] += 1 + + # Generate the number for the worker using incrementing counter + worker_base_name_counter[worker_base_name] += 1 + worker_number = worker_base_name_counter[worker_base_name] + worker_name = f"{worker_base_name}{worker_number}" + + if worker_number > 1: + # If this isn't the first worker, check that we don't have a confusing + # mixture of worker types with the same base name. + first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"] + if first_worker_with_base_name != worker_types_set: + error( + f"Can not use worker_name: '{worker_name}' for worker_type(s): " + f"{worker_types_set!r}. It is already in use by " + f"worker_type(s): {first_worker_with_base_name!r}" + ) + + dict_to_return[worker_name] = worker_types_set + + return dict_to_return + + def generate_worker_files( - environ: Mapping[str, str], config_path: str, data_dir: str + environ: Mapping[str, str], + config_path: str, + data_dir: str, + requested_worker_types: Dict[str, Set[str]], ) -> None: - """Read the desired list of workers from environment variables and generate - shared homeserver, nginx and supervisord configs. + """Read the desired workers(if any) that is passed in and generate shared + homeserver, nginx and supervisord configs. Args: environ: os.environ instance. config_path: The location of the generated Synapse main worker config file. data_dir: The location of the synapse data directory. Where log and user-facing config files live. + requested_worker_types: A Dict containing requested workers in the format of + {'worker_name1': {'worker_type', ...}} """ # Note that yaml cares about indentation, so care should be taken to insert lines # into files at the correct indentation below. - # shared_config is the contents of a Synapse config file that will be shared amongst - # the main Synapse process as well as all workers. - # It is intended mainly for disabling functionality when certain workers are spun up, - # and adding a replication listener. - - # First read the original config file and extract the listeners block. Then we'll add - # another listener for replication. Later we'll write out the result to the shared - # config file. + # First read the original config file and extract the listeners block. Then we'll + # add another listener for replication. Later we'll write out the result to the + # shared config file. listeners = [ { "port": 9093, @@ -425,9 +729,9 @@ def generate_worker_files( listeners += original_listeners # The shared homeserver config. The contents of which will be inserted into the - # base shared worker jinja2 template. - # - # This config file will be passed to all workers, included Synapse's main process. + # base shared worker jinja2 template. This config file will be passed to all + # workers, included Synapse's main process. It is intended mainly for disabling + # functionality when certain workers are spun up, and adding a replication listener. shared_config: Dict[str, Any] = {"listeners": listeners} # List of dicts that describe workers. @@ -435,31 +739,20 @@ def generate_worker_files( # program blocks. worker_descriptors: List[Dict[str, Any]] = [] - # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the - # ports of each worker. For example: + # Upstreams for load-balancing purposes. This dict takes the form of the worker + # type to the ports of each worker. For example: # { # worker_type: {1234, 1235, ...}} # } # and will be used to construct 'upstream' nginx directives. nginx_upstreams: Dict[str, Set[int]] = {} - # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be - # placed after the proxy_pass directive. The main benefit to representing this data as a - # dict over a str is that we can easily deduplicate endpoints across multiple instances - # of the same worker. - # - # An nginx site config that will be amended to depending on the workers that are - # spun up. To be placed in /etc/nginx/conf.d. - nginx_locations = {} - - # Read the desired worker configuration from the environment - worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() - if not worker_types_env: - # No workers, just the main process - worker_types = [] - else: - # Split type names by comma, ignoring whitespace. - worker_types = [x.strip() for x in worker_types_env.split(",")] + # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what + # will be placed after the proxy_pass directive. The main benefit to representing + # this data as a dict over a str is that we can easily deduplicate endpoints + # across multiple instances of the same worker. The final rendering will be combined + # with nginx_upstreams and placed in /etc/nginx/conf.d. + nginx_locations: Dict[str, str] = {} # Create the worker configuration directory if it doesn't already exist os.makedirs("/conf/workers", exist_ok=True) @@ -467,66 +760,57 @@ def generate_worker_files( # Start worker ports from this arbitrary port worker_port = 18009 - # A counter of worker_type -> int. Used for determining the name for a given - # worker type when generating its config file, as each worker's name is just - # worker_type + instance # - worker_type_counter: Dict[str, int] = {} - # A list of internal endpoints to healthcheck, starting with the main process # which exists even if no workers do. healthcheck_urls = ["http://localhost:8080/health"] - # For each worker type specified by the user, create config values - for worker_type in worker_types: - worker_config = WORKERS_CONFIG.get(worker_type) - if worker_config: - worker_config = worker_config.copy() - else: - error(worker_type + " is an unknown worker type! Please fix!") - - new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 - worker_type_counter[worker_type] = new_worker_count + # Get the set of all worker types that we have configured + all_worker_types_in_use = set(chain(*requested_worker_types.values())) + # Map locations to upstreams (corresponding to worker types) in Nginx + # but only if we use the appropriate worker type + for worker_type in all_worker_types_in_use: + for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]: + nginx_locations[endpoint_pattern] = f"http://{worker_type}" + + # For each worker type specified by the user, create config values and write it's + # yaml config file + for worker_name, worker_types_set in requested_worker_types.items(): + # The collected and processed data will live here. + worker_config: Dict[str, Any] = {} + + # Merge all worker config templates for this worker into a single config + for worker_type in worker_types_set: + copy_of_template_config = WORKERS_CONFIG[worker_type].copy() + + # Merge worker type template configuration data. It's a combination of lists + # and dicts, so use this helper. + worker_config = merge_worker_template_configs( + worker_config, copy_of_template_config + ) + + # Replace placeholder names in the config template with the actual worker name. + worker_config = insert_worker_name_for_worker_config(worker_config, worker_name) - # Name workers by their type concatenated with an incrementing number - # e.g. federation_reader1 - worker_name = worker_type + str(new_worker_count) worker_config.update( {"name": worker_name, "port": str(worker_port), "config_path": config_path} ) - # Update the shared config with any worker-type specific options - shared_config.update(worker_config["shared_extra_conf"]) + # Update the shared config with any worker_type specific options. The first of a + # given worker_type needs to stay assigned and not be replaced. + worker_config["shared_extra_conf"].update(shared_config) + shared_config = worker_config["shared_extra_conf"] healthcheck_urls.append("http://localhost:%d/health" % (worker_port,)) - # Check if more than one instance of this worker type has been specified - worker_type_total_count = worker_types.count(worker_type) - # Update the shared config with sharding-related options if necessary add_worker_roles_to_shared_config( - shared_config, worker_type, worker_name, worker_port + shared_config, worker_types_set, worker_name, worker_port ) # Enable the worker in supervisord worker_descriptors.append(worker_config) - # Add nginx location blocks for this worker's endpoints (if any are defined) - for pattern in worker_config["endpoint_patterns"]: - # Determine whether we need to load-balance this worker - if worker_type_total_count > 1: - # Create or add to a load-balanced upstream for this worker - nginx_upstreams.setdefault(worker_type, set()).add(worker_port) - - # Upstreams are named after the worker_type - upstream = "http://" + worker_type - else: - upstream = "http://localhost:%d" % (worker_port,) - - # Note that this endpoint should proxy to this upstream - nginx_locations[pattern] = upstream - # Write out the worker's logging config file - log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir) # Then a worker config file @@ -537,6 +821,10 @@ def generate_worker_files( worker_log_config_filepath=log_config_filepath, ) + # Save this worker's port number to the correct nginx upstreams + for worker_type in worker_types_set: + nginx_upstreams.setdefault(worker_type, set()).add(worker_port) + worker_port += 1 # Build the nginx location config blocks @@ -549,15 +837,14 @@ def generate_worker_files( # Determine the load-balancing upstreams to configure nginx_upstream_config = "" - - for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): + for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items(): body = "" for port in upstream_worker_ports: - body += " server localhost:%d;\n" % (port,) + body += f" server localhost:{port};\n" # Add to the list of configured upstreams nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( - upstream_worker_type=upstream_worker_type, + upstream_worker_base_name=upstream_worker_base_name, body=body, ) @@ -578,7 +865,7 @@ def generate_worker_files( if reg_path.suffix.lower() in (".yaml", ".yml") ] - workers_in_use = len(worker_types) > 0 + workers_in_use = len(requested_worker_types) > 0 # Shared homeserver config convert( @@ -674,17 +961,34 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: if not os.path.exists(config_path): log("Generating base homeserver config") generate_base_homeserver_config() - - # This script may be run multiple times (mostly by Complement, see note at top of file). - # Don't re-configure workers in this instance. + else: + log("Base homeserver config exists—not regenerating") + # This script may be run multiple times (mostly by Complement, see note at top of + # file). Don't re-configure workers in this instance. mark_filepath = "/conf/workers_have_been_configured" if not os.path.exists(mark_filepath): + # Collect and validate worker_type requests + # Read the desired worker configuration from the environment + worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() + # Only process worker_types if they exist + if not worker_types_env: + # No workers, just the main process + worker_types = [] + requested_worker_types: Dict[str, Any] = {} + else: + # Split type names by comma, ignoring whitespace. + worker_types = split_and_strip_string(worker_types_env, ",") + requested_worker_types = parse_worker_types(worker_types) + # Always regenerate all other config files - generate_worker_files(environ, config_path, data_dir) + log("Generating worker config files") + generate_worker_files(environ, config_path, data_dir, requested_worker_types) # Mark workers as being configured with open(mark_filepath, "w") as f: f.write("") + else: + log("Worker config exists—not regenerating") # Lifted right out of start.py jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),) diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index beec8bb7efe9..83f7dc37f41a 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -169,3 +169,17 @@ The following fields are returned in the JSON response body: * `canonical_alias`: string - The canonical alias of the room. `null` if the room does not have a canonical alias set. * `event_json`: object - Details of the original event that was reported. + +# Delete a specific event report + +This API deletes a specific event report. If the request is successful, the response body +will be an empty JSON object. + +The api is: +``` +DELETE /_synapse/admin/v1/event_reports/ +``` + +**URL parameters:** + +* `report_id`: string - The ID of the event report. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 7f8c8e22c1a0..30833f31091b 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -235,6 +235,14 @@ The following fields are returned in the JSON response body: Request: +``` +POST /_synapse/admin/v1/media/delete?before_ts= + +{} +``` + +*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint: + ``` POST /_synapse/admin/v1/media//delete?before_ts= @@ -243,7 +251,7 @@ POST /_synapse/admin/v1/media//delete?before_ts= URL Parameters -* `server_name`: string - The name of your local server (e.g `matrix.org`). +* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.* * `before_ts`: string representing a positive integer - Unix timestamp in milliseconds. Files that were last used before this timestamp will be deleted. It is the timestamp of last access, not the timestamp when the file was created. diff --git a/docs/delegate.md b/docs/delegate.md index ee9cbb3b1cfc..aee82fcb9ab3 100644 --- a/docs/delegate.md +++ b/docs/delegate.md @@ -73,6 +73,15 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge not recommended, as it can be difficult to configure the TLS certificates correctly in this case, and it offers little advantage over `.well-known` delegation. +Please keep in mind that server delegation is a function of server-server communication, +and as such using SRV DNS records will not cover use cases involving client-server comms. +This means setting global client settings (such as a Jitsi endpoint, or disabling +creating new rooms as encrypted by default, etc) will still require that you serve a file +from the `https:///.well-known/` endpoints defined in the spec! If you are +considering using SRV DNS delegation to avoid serving files from this endpoint, consider +the impact that you will not be able to change those client-based default values globally, +and will be relegated to the featureset of the configuration of each individual client. + However, if you really need it, you can find some documentation on what such a record should look like and how Synapse will use it in [the Matrix specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names). diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 36bc88468468..925dcd8933e3 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -78,6 +78,19 @@ poetry install --extras all This will install the runtime and developer dependencies for the project. +## Running Synapse via poetry + +To start a local instance of Synapse in the locked poetry environment, create a config file: + +```sh +cp docs/sample_config.yaml homeserver.yaml +``` + +Now edit homeserver.yaml, and run Synapse with: + +```sh +poetry run python -m synapse.app.homeserver -c homeserver.yaml +``` # 5. Get in touch. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 50969edd46ff..1a0c6ec954ca 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a ```python async def check_media_file_for_spam( - file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", - file_info: "synapse.rest.media.v1._base.FileInfo", + file_wrapper: "synapse.media.media_storage.ReadableFileWrapper", + file_info: "synapse.media._base.FileInfo", ) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index e1a5b6524fb4..4a27d976fb13 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -146,6 +146,9 @@ Note that this callback is called when the event has already been processed and into the room, which means this callback cannot be used to deny persisting the event. To deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead. +For any given event, this callback will be called on every worker process, even if that worker will not end up +acting on that event. This callback will not be called for events that are marked as rejected. + If multiple modules implement this callback, Synapse runs them all in order. ### `check_can_shutdown_room` @@ -251,6 +254,11 @@ If multiple modules implement this callback, Synapse runs them all in order. _First introduced in Synapse v1.56.0_ +** +This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which +features the same functionality. The only difference is in name. +** + ```python async def on_threepid_bind(user_id: str, medium: str, address: str) -> None: ``` @@ -265,6 +273,44 @@ server_. If multiple modules implement this callback, Synapse runs them all in order. +### `on_add_user_third_party_identifier` + +_First introduced in Synapse v1.79.0_ + +```python +async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: +``` + +Called after successfully creating an association between a user and a third-party identifier +(email address, phone number). The module is given the Matrix ID of the user the +association is for, as well as the medium (`email` or `msisdn`) and address of the +third-party identifier (i.e. an email address). + +Note that this callback is _not_ called if a user attempts to bind their third-party identifier +to an identity server (via a call to [`POST +/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)). + +If multiple modules implement this callback, Synapse runs them all in order. + +### `on_remove_user_third_party_identifier` + +_First introduced in Synapse v1.79.0_ + +```python +async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: +``` + +Called after successfully removing an association between a user and a third-party identifier +(email address, phone number). The module is given the Matrix ID of the user the +association is for, as well as the medium (`email` or `msisdn`) and address of the +third-party identifier (i.e. an email address). + +Note that this callback is _not_ called if a user attempts to unbind their third-party +identifier from an identity server (via a call to [`POST +/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)). + +If multiple modules implement this callback, Synapse runs them all in order. + ## Example The example below is a module that implements the third-party rules callback @@ -297,4 +343,4 @@ class EventCensorer: ) event_dict["content"] = new_event_content return event_dict -``` +``` \ No newline at end of file diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md index 30de69a53387..b99f64b9d8e8 100644 --- a/docs/modules/writing_a_module.md +++ b/docs/modules/writing_a_module.md @@ -83,3 +83,59 @@ the callback name as the argument name and the function as its value. A Callbacks for each category can be found on their respective page of the [Synapse documentation website](https://matrix-org.github.io/synapse). + +## Caching + +_Added in Synapse 1.74.0._ + +Modules can leverage Synapse's caching tools to manage their own cached functions. This +can be helpful for modules that need to repeatedly request the same data from the database +or a remote service. + +Functions that need to be wrapped with a cache need to be decorated with a `@cached()` +decorator (which can be imported from `synapse.module_api`) and registered with the +[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888) +API when initialising the module. If the module needs to invalidate an entry in a cache, +it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904) +API, with the function to invalidate the cache of and the key(s) of the entry to +invalidate. + +Below is an example of a simple module using a cached function: + +```python +from typing import Any +from synapse.module_api import cached, ModuleApi + +class MyModule: + def __init__(self, config: Any, api: ModuleApi): + self.api = api + + # Register the cached function so Synapse knows how to correctly invalidate + # entries for it. + self.api.register_cached_function(self.get_user_from_id) + + @cached() + async def get_department_for_user(self, user_id: str) -> str: + """A function with a cache.""" + # Request a department from an external service. + return await self.http_client.get_json( + "https://int.example.com/users", {"user_id": user_id) + )["department"] + + async def do_something_with_users(self) -> None: + """Calls the cached function and then invalidates an entry in its cache.""" + + user_id = "@alice:example.com" + + # Get the user. Since get_department_for_user is wrapped with a cache, + # the return value for this user_id will be cached. + department = await self.get_department_for_user(user_id) + + # Do something with `department`... + + # Let's say something has changed with our user, and the entry we have for + # them in the cache is out of date, so we want to invalidate it. + await self.api.invalidate_cache(self.get_department_for_user, (user_id,)) +``` + +See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details. diff --git a/docs/openid.md b/docs/openid.md index 6ee8c83ec0a2..73f1e061210c 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -590,6 +590,47 @@ oidc_providers: Note that the fields `client_id` and `client_secret` are taken from the CURL response above. +### Shibboleth with OIDC Plugin + +[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities. + +1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly. +2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret` + are randomly generated data. +```json +{ + "client_id": "SOME-CLIENT-ID", + "client_secret": "SOME-SUPER-SECRET-SECRET", + "response_types": ["code"], + "grant_types": ["authorization_code"], + "scope": "openid profile email", + "redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"] +} +``` + +Synapse config: + +```yaml +oidc_providers: + # Shibboleth IDP + # + - idp_id: shibboleth + idp_name: "Shibboleth Login" + discover: true + issuer: "https://YOUR-IDP-URL.TLD" + client_id: "YOUR_CLIENT_ID" + client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP" + scopes: ["openid", "profile", "email"] + allow_existing_users: true + user_profile_method: "userinfo_endpoint" + user_mapping_provider: + config: + subject_claim: "sub" + localpart_template: "{{ user.sub.split('@')[0] }}" + display_name_template: "{{ user.name }}" + email_template: "{{ user.email }}" +``` + ### Twitch 1. Setup a developer account on [Twitch](https://dev.twitch.tv/) diff --git a/docs/setup/installation.md b/docs/setup/installation.md index d123e339edc9..86e506a3e231 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -26,8 +26,8 @@ for most users. #### Docker images and Ansible playbooks There is an official synapse image available at - which can be used with -the docker-compose file available at + or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse) +which can be used with the docker-compose file available at [contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker). Further information on this including configuration options is available in the README on hub.docker.com. diff --git a/docs/upgrade.md b/docs/upgrade.md index bc143444bed6..f14444a400ea 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,51 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.80.0 + +## Reporting events error code change + +Before this update, the +[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) +endpoint would return a `403` if a user attempted to report an event that they did not have access to. +This endpoint will now return a `404` in this case instead. + +Clients that implement event reporting should check that their error handling code will handle this +change. + +# Upgrading to v1.79.0 + +## The `on_threepid_bind` module callback method has been deprecated + +Synapse v1.79.0 deprecates the +[`on_threepid_bind`](modules/third_party_rules_callbacks.md#on_threepid_bind) +"third-party rules" Synapse module callback method in favour of a new module method, +[`on_add_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_add_user_third_party_identifier). +`on_threepid_bind` will be removed in a future version of Synapse. You should check whether any Synapse +modules in use in your deployment are making use of `on_threepid_bind`, and update them where possible. + +The arguments and functionality of the new method are the same. + +The justification behind the name change is that the old method's name, `on_threepid_bind`, was +misleading. A user is considered to "bind" their third-party ID to their Matrix ID only if they +do so via an [identity server](https://spec.matrix.org/latest/identity-service-api/) +(so that users on other homeservers may find them). But this method was not called in that case - +it was only called when a user added a third-party identifier on the local homeserver. + +Module developers may also be interested in the related +[`on_remove_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_remove_user_third_party_identifier) +module callback method that was also added in Synapse v1.79.0. This new method is called when a +user removes a third-party identifier from their account. + +# Upgrading to v1.78.0 + +## Deprecate the `/_synapse/admin/v1/media//delete` admin API + +Synapse 1.78.0 replaces the `/_synapse/admin/v1/media//delete` +admin API with an identical endpoint at `/_synapse/admin/v1/media/delete`. Please +update your tooling to use the new endpoint. The deprecated version will be removed +in a future release. + # Upgrading to v1.76.0 ## Faster joins are enabled by default @@ -137,6 +182,7 @@ and then do `pip install matrix-synapse[user-search]` for a PyPI install. Docker images and Debian packages need nothing specific as they already include or specify ICU as an explicit dependency. + # Upgrading to v1.73.0 ## Legacy Prometheus metric names have now been removed diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 7a2774119964..28c3dd53a5f4 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -70,10 +70,55 @@ output-directory │ ├───state │ ├───invite_state │ └───knock_state -└───user_data - ├───connections - ├───devices - └───profile +├───user_data +│ ├───account_data +│ │ ├───global +│ │ └─── +│ ├───connections +│ ├───devices +│ └───profile +└───media_ids + └─── +``` + +The `media_ids` folder contains only the metadata of the media uploaded by the user. +It does not contain the media itself. +Furthermore, only the `media_ids` that Synapse manages itself are exported. +If another media repository (e.g. [matrix-media-repo](https://github.com/turt2live/matrix-media-repo)) +is used, the data must be exported separately. + +With the `media_ids` the media files can be downloaded. +Media that have been sent in encrypted rooms are only retrieved in encrypted form. +The following script can help with download the media files: + +```bash +#!/usr/bin/env bash + +# Parameters +# +# source_directory: Directory which contains the export with the media_ids. +# target_directory: Directory into which all files are to be downloaded. +# repository_url: Address of the media repository resp. media worker. +# serverName: Name of the server (`server_name` from homeserver.yaml). +# +# Example: +# ./download_media.sh /tmp/export_data/media_ids/ /tmp/export_data/media_files/ http://localhost:8008 matrix.example.com + +source_directory=$1 +target_directory=$2 +repository_url=$3 +serverName=$4 + +mkdir -p $target_directory + +for file in $source_directory/*; do + filename=$(basename ${file}) + url=$repository_url/_matrix/media/v3/download/$serverName/$filename + echo "Downloading $filename - $url" + if ! wget -o /dev/null -P $target_directory $url; then + echo "Could not download $filename" + fi +done ``` Manually resetting passwords @@ -84,7 +129,7 @@ can reset a user's password using the [admin API](../../admin_api/user_admin_api I have a problem with my server. Can I just delete my database and start again? --- -Deleting your database is unlikely to make anything better. +Deleting your database is unlikely to make anything better. It's easy to make the mistake of thinking that you can start again from a clean slate by dropping your database, but things don't work like that in a federated @@ -99,7 +144,7 @@ Come and seek help in https://matrix.to/#/#synapse:matrix.org. There are two exceptions when it might be sensible to delete your database and start again: * You have *never* joined any rooms which are federated with other servers. For -instance, a local deployment which the outside world can't talk to. +instance, a local deployment which the outside world can't talk to. * You are changing the `server_name` in the homeserver configuration. In effect this makes your server a completely new one from the point of view of the network, so in this case it makes sense to start with a clean database. @@ -112,7 +157,7 @@ Using the following curl command: curl -H 'Authorization: Bearer ' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/ ``` `` - can be obtained in riot by looking in the riot settings, down the bottom is: -Access Token:\ +Access Token:\ `` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org @@ -149,13 +194,13 @@ What are the biggest rooms on my server? --- ```sql -SELECT s.canonical_alias, g.room_id, count(*) AS num_rows -FROM - state_groups_state AS g, - room_stats_state AS s -WHERE g.room_id = s.room_id +SELECT s.canonical_alias, g.room_id, count(*) AS num_rows +FROM + state_groups_state AS g, + room_stats_state AS s +WHERE g.room_id = s.room_id GROUP BY s.canonical_alias, g.room_id -ORDER BY num_rows desc +ORDER BY num_rows desc LIMIT 10; ``` diff --git a/docs/usage/administration/database_maintenance_tools.md b/docs/usage/administration/database_maintenance_tools.md index 92b805d413cb..e19380db07be 100644 --- a/docs/usage/administration/database_maintenance_tools.md +++ b/docs/usage/administration/database_maintenance_tools.md @@ -1,4 +1,4 @@ -This blog post by Victor Berger explains how to use many of the tools listed on this page: https://levans.fr/shrink-synapse-database.html +_This [blog post by Jackson Chen](https://jacksonchen666.com/posts/2022-12-03/14-33-00/) (Dec 2022) explains how to use many of the tools listed on this page. There is also an [earlier blog by Victor Berger](https://levans.fr/shrink-synapse-database.html) (June 2020), though this may be outdated in places._ # List of useful tools and scripts for maintenance Synapse database: @@ -15,4 +15,4 @@ The purge history API allows server admins to purge historic events from their d Tool for compressing (deduplicating) `state_groups_state` table. ## [SQL for analyzing Synapse PostgreSQL database stats](useful_sql_for_admins.md) -Some easy SQL that reports useful stats about your Synapse database. \ No newline at end of file +Some easy SQL that reports useful stats about your Synapse database. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 2883f76a26cd..015855ee7ef4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1105,7 +1105,7 @@ This setting should only be used in very specific cases, such as federation over Tor hidden services and similar. For private networks of homeservers, you likely want to use a private CA instead. -Only effective if `federation_verify_certicates` is `true`. +Only effective if `federation_verify_certificates` is `true`. Example configuration: ```yaml @@ -1518,11 +1518,11 @@ rc_registration_token_validity: This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP - address. Defaults to `per_second: 0.17`, `burst_count: 3`. + address. Defaults to `per_second: 0.003`, `burst_count: 5`. * `account` ratelimits login requests based on the account the - client is attempting to log into. Defaults to `per_second: 0.17`, - `burst_count: 3`. + client is attempting to log into. Defaults to `per_second: 0.03`, + `burst_count: 5`. * `failed_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login @@ -2227,12 +2227,12 @@ allows the shared secret to be specified in an external file. The file should be a plain text file, containing only the shared secret. -If this file does not exist, Synapse will create a new signing -key on startup and store it in this file. +If this file does not exist, Synapse will create a new shared +secret on startup and store it in this file. Example configuration: ```yaml -registration_shared_secret_file: /path/to/secrets/file +registration_shared_secret_path: /path/to/secrets/file ``` _Added in Synapse 1.67.0._ @@ -3927,6 +3927,9 @@ This setting has the following sub-options: * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 * `password`: Optional password if configured on the Redis instance. +* `dbid`: Optional redis dbid if needs to connect to specific redis logical db. + + _Added in Synapse 1.78.0._ Example configuration: ```yaml @@ -3935,6 +3938,7 @@ redis: host: localhost port: 6379 password: + dbid: ``` --- ## Individual worker configuration diff --git a/docs/workers.md b/docs/workers.md index bc66f0e1bce5..bf7690f5aff2 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -160,7 +160,18 @@ recommend the use of `systemd` where available: for information on setting up [Systemd with Workers](systemd-with-workers/). To use `synctl`, see [Using synctl with Workers](synctl_workers.md). +## Start Synapse with Poetry +The following applies to Synapse installations that have been installed from source using `poetry`. + +You can start the main Synapse process with Poetry by running the following command: +```console +poetry run synapse_homeserver -c [your homeserver.yaml] +``` +For worker setups, you can run the following command +```console +poetry run synapse_worker -c [your worker.yaml] +``` ## Available worker applications ### `synapse.app.generic_worker` @@ -220,7 +231,9 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ ^/_matrix/client/v1/rooms/.*/timestamp_to_event$ + ^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ + ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ @@ -232,6 +245,7 @@ information. # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ ^/_matrix/client/(r0|v3|unstable)/register$ + ^/_matrix/client/(r0|v3|unstable)/register/available$ ^/_matrix/client/v1/register/m.login.registration_token/validity$ # Event sending requests @@ -240,6 +254,7 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ ^/_matrix/client/(api/v1|r0|v3|unstable)/join/ + ^/_matrix/client/(api/v1|r0|v3|unstable)/knock/ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ # Account data requests diff --git a/mypy.ini b/mypy.ini index 0efafb26b6a6..945f7925cb2c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -31,18 +31,11 @@ exclude = (?x) |synapse/storage/databases/__init__.py |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - - |tests/module_api/test_api.py - |tests/rest/media/v1/test_media_storage.py - |tests/server.py )$ [mypy-synapse.federation.transport.client] disallow_untyped_defs = False -[mypy-synapse.http.client] -disallow_untyped_defs = False - [mypy-synapse.http.matrixfederationclient] disallow_untyped_defs = False @@ -55,87 +48,9 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False -[mypy-synapse.server] -disallow_untyped_defs = False - -[mypy-synapse.storage.database] -disallow_untyped_defs = False - -[mypy-tests.*] -disallow_untyped_defs = False - -[mypy-tests.api.*] -disallow_untyped_defs = True - -[mypy-tests.app.*] -disallow_untyped_defs = True - -[mypy-tests.appservice.*] -disallow_untyped_defs = True - -[mypy-tests.config.*] -disallow_untyped_defs = True - -[mypy-tests.crypto.*] -disallow_untyped_defs = True - -[mypy-tests.events.*] -disallow_untyped_defs = True - -[mypy-tests.federation.*] -disallow_untyped_defs = True - -[mypy-tests.handlers.*] -disallow_untyped_defs = True - -[mypy-tests.http.*] -disallow_untyped_defs = True - -[mypy-tests.logging.*] -disallow_untyped_defs = True - -[mypy-tests.metrics.*] -disallow_untyped_defs = True - -[mypy-tests.push.*] -disallow_untyped_defs = True - -[mypy-tests.replication.*] -disallow_untyped_defs = True - -[mypy-tests.rest.*] -disallow_untyped_defs = True - -[mypy-tests.state.test_profile] -disallow_untyped_defs = True - -[mypy-tests.storage.*] -disallow_untyped_defs = True - -[mypy-tests.test_server] -disallow_untyped_defs = True - -[mypy-tests.test_state] -disallow_untyped_defs = True - -[mypy-tests.test_terms_auth] -disallow_untyped_defs = True - -[mypy-tests.types.*] -disallow_untyped_defs = True - -[mypy-tests.util.caches.*] -disallow_untyped_defs = True - [mypy-tests.util.caches.test_descriptors] disallow_untyped_defs = False -[mypy-tests.util.*] -disallow_untyped_defs = True - -[mypy-tests.utils] -disallow_untyped_defs = True - ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. ;; The `typeshed` project maintains stubs here: @@ -156,11 +71,6 @@ ignore_missing_imports = True [mypy-msgpack] ignore_missing_imports = True -# Note: WIP stubs available at -# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr -[mypy-netaddr] -ignore_missing_imports = True - [mypy-parameterized.*] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index c6f30530dfc3..eaced98fd3e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. [[package]] name = "attrs" @@ -90,32 +90,46 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "22.12.0" +version = "23.3.0" description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"}, + {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"}, + {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"}, + {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"}, + {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"}, + {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"}, + {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"}, + {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"}, + {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"}, + {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"}, + {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"}, + {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"}, + {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"}, + {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"}, + {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"}, + {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"}, + {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"}, + {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"}, ] [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" +packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} @@ -127,14 +141,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "bleach" -version = "5.0.1" +version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, - {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"}, + {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, + {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, ] [package.dependencies] @@ -143,18 +157,17 @@ webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.2)"] -dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"] [[package]] name = "canonicaljson" -version = "1.6.4" +version = "1.6.5" description = "Canonical JSON" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"}, - {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, + {file = "canonicaljson-1.6.5-py3-none-any.whl", hash = "sha256:806ea6f2cbb7405d20259e1c36dd1214ba5c242fa9165f5bd0bf2081f82c23fb"}, + {file = "canonicaljson-1.6.5.tar.gz", hash = "sha256:68dfc157b011e07d94bf74b5d4ccc01958584ed942d9dfd5fdd706609e81cd4b"}, ] [package.dependencies] @@ -255,19 +268,89 @@ pycparser = "*" [[package]] name = "charset-normalizer" -version = "2.0.12" +version = "3.1.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false -python-versions = ">=3.5.0" +python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, - {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] -[package.extras] -unicode-backport = ["unicodedata2"] - [[package]] name = "click" version = "8.1.3" @@ -300,14 +383,14 @@ click = "*" [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.6" description = "Cross-platform colored terminal text." category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] [[package]] @@ -339,50 +422,45 @@ files = [ [[package]] name = "cryptography" -version = "38.0.4" +version = "40.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, - {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, - {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, - {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, + {file = "cryptography-40.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:918cb89086c7d98b1b86b9fdb70c712e5a9325ba6f7d7cfb509e784e0cfc6917"}, + {file = "cryptography-40.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:9618a87212cb5200500e304e43691111570e1f10ec3f35569fdfcd17e28fd797"}, + {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a4805a4ca729d65570a1b7cac84eac1e431085d40387b7d3bbaa47e39890b88"}, + {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63dac2d25c47f12a7b8aa60e528bfb3c51c5a6c5a9f7c86987909c6c79765554"}, + {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0a4e3406cfed6b1f6d6e87ed243363652b2586b2d917b0609ca4f97072994405"}, + {file = "cryptography-40.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1e0af458515d5e4028aad75f3bb3fe7a31e46ad920648cd59b64d3da842e4356"}, + {file = "cryptography-40.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d8aa3609d337ad85e4eb9bb0f8bcf6e4409bfb86e706efa9a027912169e89122"}, + {file = "cryptography-40.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cf91e428c51ef692b82ce786583e214f58392399cf65c341bc7301d096fa3ba2"}, + {file = "cryptography-40.0.1-cp36-abi3-win32.whl", hash = "sha256:650883cc064297ef3676b1db1b7b1df6081794c4ada96fa457253c4cc40f97db"}, + {file = "cryptography-40.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:a805a7bce4a77d51696410005b3e85ae2839bad9aa38894afc0aa99d8e0c3160"}, + {file = "cryptography-40.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd033d74067d8928ef00a6b1327c8ea0452523967ca4463666eeba65ca350d4c"}, + {file = "cryptography-40.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d36bbeb99704aabefdca5aee4eba04455d7a27ceabd16f3b3ba9bdcc31da86c4"}, + {file = "cryptography-40.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:32057d3d0ab7d4453778367ca43e99ddb711770477c4f072a51b3ca69602780a"}, + {file = "cryptography-40.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:f5d7b79fa56bc29580faafc2ff736ce05ba31feaa9d4735048b0de7d9ceb2b94"}, + {file = "cryptography-40.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7c872413353c70e0263a9368c4993710070e70ab3e5318d85510cc91cce77e7c"}, + {file = "cryptography-40.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:28d63d75bf7ae4045b10de5413fb1d6338616e79015999ad9cf6fc538f772d41"}, + {file = "cryptography-40.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6f2bbd72f717ce33100e6467572abaedc61f1acb87b8d546001328d7f466b778"}, + {file = "cryptography-40.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cc3a621076d824d75ab1e1e530e66e7e8564e357dd723f2533225d40fe35c60c"}, + {file = "cryptography-40.0.1.tar.gz", hash = "sha256:2803f2f8b1e95f614419926c7e6f55d828afc614ca5ed61543877ae668cc3472"}, ] [package.dependencies] cffi = ">=1.12" [package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +pep8test = ["black", "check-manifest", "mypy", "ruff"] sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] +test = ["iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] +tox = ["tox"] [[package]] name = "defusedxml" @@ -416,30 +494,30 @@ dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version [[package]] name = "docutils" -version = "0.18.1" +version = "0.19" description = "Docutils -- Python Documentation Utilities" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" files = [ - {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, - {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, + {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, + {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, ] [[package]] name = "elementpath" -version = "2.5.0" -description = "XPath 1.0/2.0 parsers and selectors for ElementTree and lxml" +version = "4.1.1" +description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and lxml" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "elementpath-2.5.0-py3-none-any.whl", hash = "sha256:2a432775e37a19e4362443078130a7dbfc457d7d093cd421c03958d9034cc08b"}, - {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"}, + {file = "elementpath-4.1.1-py3-none-any.whl", hash = "sha256:3ac9735460824ed5f14c223d8de36bede2a9347e11b2775dcbf801eede1c7cdc"}, + {file = "elementpath-4.1.1.tar.gz", hash = "sha256:cd2bff3de8cddf8a480f728e648c6cae47d8ab66696c7d830a7a84536d8cae58"}, ] [package.extras] -dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"] +dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler", "memray", "mypy", "tox", "xmlschema (>=2.0.0)"] [[package]] name = "frozendict" @@ -470,14 +548,14 @@ files = [ [[package]] name = "gitdb" -version = "4.0.9" +version = "4.0.10" description = "Git Object Database" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, - {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, + {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, + {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, ] [package.dependencies] @@ -485,14 +563,14 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.30" -description = "GitPython is a python library used to interact with Git repositories" +version = "3.1.31" +description = "GitPython is a Python library used to interact with Git repositories" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, - {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, + {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, + {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, ] [package.dependencies] @@ -501,53 +579,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.0.0" +version = "2.2.2" description = "Python wrapper for hiredis" category = "main" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a"}, - {file = "hiredis-2.0.0-cp36-cp36m-win32.whl", hash = "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63"}, - {file = "hiredis-2.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6"}, - {file = "hiredis-2.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79"}, - {file = "hiredis-2.0.0-cp37-cp37m-win32.whl", hash = "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc"}, - {file = "hiredis-2.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a"}, - {file = "hiredis-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048"}, - {file = "hiredis-2.0.0-cp38-cp38-win32.whl", hash = "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426"}, - {file = "hiredis-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581"}, - {file = "hiredis-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e"}, - {file = "hiredis-2.0.0-cp39-cp39-win32.whl", hash = "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d"}, - {file = "hiredis-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0"}, - {file = "hiredis-2.0.0.tar.gz", hash = "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"}, + {file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"}, + {file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"}, + {file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"}, + {file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"}, + {file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"}, + {file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"}, + {file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"}, + {file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"}, + {file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"}, + {file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"}, + {file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"}, + {file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"}, ] [[package]] @@ -579,86 +705,102 @@ files = [ [[package]] name = "ijson" -version = "3.1.4" +version = "3.2.0.post0" description = "Iterative JSON parser with standard Python iterator interfaces" category = "main" optional = false python-versions = "*" files = [ - {file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:475fc25c3d2a86230b85777cae9580398b42eed422506bf0b6aacfa936f7bfcd"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f587699b5a759e30accf733e37950cc06c4118b72e3e146edcea77dded467426"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:339b2b4c7bbd64849dd69ef94ee21e29dcd92c831f47a281fdd48122bb2a715a"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:446ef8980504da0af8d20d3cb6452c4dc3d8aa5fd788098985e899b913191fe6"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3997a2fdb28bc04b9ab0555db5f3b33ed28d91e9d42a3bf2c1842d4990beb158"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:fa10a1d88473303ec97aae23169d77c5b92657b7fb189f9c584974c00a79f383"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:9a5bf5b9d8f2ceaca131ee21fc7875d0f34b95762f4f32e4d65109ca46472147"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:81cc8cee590c8a70cca3c9aefae06dd7cb8e9f75f3a7dc12b340c2e332d33a2a"}, - {file = "ijson-3.1.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ea5fc50ba158f72943d5174fbc29ebefe72a2adac051c814c87438dc475cf78"}, - {file = "ijson-3.1.4-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3b98861a4280cf09d267986cefa46c3bd80af887eae02aba07488d80eb798afa"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:068c692efba9692406b86736dcc6803e4a0b6280d7f0b7534bff3faec677ff38"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:86884ac06ac69cea6d89ab7b84683b3b4159c4013e4a20276d3fc630fe9b7588"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:41e5886ff6fade26f10b87edad723d2db14dcbb1178717790993fcbbb8ccd333"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:24b58933bf777d03dc1caa3006112ec7f9e6f6db6ffe1f5f5bd233cb1281f719"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:13f80aad0b84d100fb6a88ced24bade21dc6ddeaf2bba3294b58728463194f50"}, - {file = "ijson-3.1.4-cp35-cp35m-win32.whl", hash = "sha256:fa9a25d0bd32f9515e18a3611690f1de12cb7d1320bd93e9da835936b41ad3ff"}, - {file = "ijson-3.1.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c4c1bf98aaab4c8f60d238edf9bcd07c896cfcc51c2ca84d03da22aad88957c5"}, - {file = "ijson-3.1.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f0f2a87c423e8767368aa055310024fa28727f4454463714fef22230c9717f64"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15507de59d74d21501b2a076d9c49abf927eb58a51a01b8f28a0a0565db0a99f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2e6bd6ad95ab40c858592b905e2bbb4fe79bbff415b69a4923dafe841ffadcb4"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:68e295bb12610d086990cedc89fb8b59b7c85740d66e9515aed062649605d0bf"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3bb461352c0f0f2ec460a4b19400a665b8a5a3a2da663a32093df1699642ee3f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f91c75edd6cf1a66f02425bafc59a22ec29bc0adcbc06f4bfd694d92f424ceb3"}, - {file = "ijson-3.1.4-cp36-cp36m-win32.whl", hash = "sha256:4c53cc72f79a4c32d5fc22efb85aa22f248e8f4f992707a84bdc896cc0b1ecf9"}, - {file = "ijson-3.1.4-cp36-cp36m-win_amd64.whl", hash = "sha256:ac9098470c1ff6e5c23ec0946818bc102bfeeeea474554c8d081dc934be20988"}, - {file = "ijson-3.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dcd6f04df44b1945b859318010234651317db2c4232f75e3933f8bb41c4fa055"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5a2f40c053c837591636dc1afb79d85e90b9a9d65f3d9963aae31d1eb11bfed2"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f50337e3b8e72ec68441b573c2848f108a8976a57465c859b227ebd2a2342901"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:454918f908abbed3c50a0a05c14b20658ab711b155e4f890900e6f60746dd7cc"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:387c2ec434cc1bc7dc9bd33ec0b70d95d443cc1e5934005f26addc2284a437ab"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:179ed6fd42e121d252b43a18833df2de08378fac7bce380974ef6f5e522afefa"}, - {file = "ijson-3.1.4-cp37-cp37m-win32.whl", hash = "sha256:26a6a550b270df04e3f442e2bf0870c9362db4912f0e7bdfd300f30ea43115a2"}, - {file = "ijson-3.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ff8cf7507d9d8939264068c2cff0a23f99703fa2f31eb3cb45a9a52798843586"}, - {file = "ijson-3.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:09c9d7913c88a6059cd054ff854958f34d757402b639cf212ffbec201a705a0d"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:702ba9a732116d659a5e950ee176be6a2e075998ef1bcde11cbf79a77ed0f717"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:667841591521158770adc90793c2bdbb47c94fe28888cb802104b8bbd61f3d51"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:df641dd07b38c63eecd4f454db7b27aa5201193df160f06b48111ba97ab62504"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:9348e7d507eb40b52b12eecff3d50934fcc3d2a15a2f54ec1127a36063b9ba8f"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:93455902fdc33ba9485c7fae63ac95d96e0ab8942224a357113174bbeaff92e9"}, - {file = "ijson-3.1.4-cp38-cp38-win32.whl", hash = "sha256:5b725f2e984ce70d464b195f206fa44bebbd744da24139b61fec72de77c03a16"}, - {file = "ijson-3.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:a5965c315fbb2dc9769dfdf046eb07daf48ae20b637da95ec8d62b629be09df4"}, - {file = "ijson-3.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8ee7dbb07cec9ba29d60cfe4954b3cc70adb5f85bba1f72225364b59c1cf82b"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d9e01c55d501e9c3d686b6ee3af351c9c0c8c3e45c5576bd5601bee3e1300b09"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:297f26f27a04cd0d0a2f865d154090c48ea11b239cabe0a17a6c65f0314bd1ca"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:9239973100338a4138d09d7a4602bd289861e553d597cd67390c33bfc452253e"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:2a64c66a08f56ed45a805691c2fd2e1caef00edd6ccf4c4e5eff02cd94ad8364"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d17fd199f0d0a4ab6e0d541b4eec1b68b5bd5bb5d8104521e22243015b51049b"}, - {file = "ijson-3.1.4-cp39-cp39-win32.whl", hash = "sha256:70ee3c8fa0eba18c80c5911639c01a8de4089a4361bad2862a9949e25ec9b1c8"}, - {file = "ijson-3.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:6bf2b64304321705d03fa5e403ec3f36fa5bb27bf661849ad62e0a3a49bc23e3"}, - {file = "ijson-3.1.4-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:5d7e3fcc3b6de76a9dba1e9fc6ca23dad18f0fa6b4e6499415e16b684b2e9af1"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:a72eb0359ebff94754f7a2f00a6efe4c57716f860fc040c606dedcb40f49f233"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:28fc168f5faf5759fdfa2a63f85f1f7a148bbae98f34404a6ba19f3d08e89e87"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2844d4a38d27583897ed73f7946e205b16926b4cab2525d1ce17e8b08064c706"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:252defd1f139b5fb8c764d78d5e3a6df81543d9878c58992a89b261369ea97a7"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:15d5356b4d090c699f382c8eb6a2bcd5992a8c8e8b88c88bc6e54f686018328a"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-win32.whl", hash = "sha256:6774ec0a39647eea70d35fb76accabe3d71002a8701c0545b9120230c182b75b"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f11da15ec04cc83ff0f817a65a3392e169be8d111ba81f24d6e09236597bb28c"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:ee13ceeed9b6cf81b3b8197ef15595fc43fd54276842ed63840ddd49db0603da"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:97e4df67235fae40d6195711223520d2c5bf1f7f5087c2963fcde44d72ebf448"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-win32.whl", hash = "sha256:3d10eee52428f43f7da28763bb79f3d90bbbeea1accb15de01e40a00885b6e89"}, - {file = "ijson-3.1.4.tar.gz", hash = "sha256:1d1003ae3c6115ec9b587d29dd136860a81a23c7626b682e2b5b12c9fd30e4ea"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5809752045ef74c26adf159ed03df7fb7e7a8d656992fd7562663ed47d6d39d9"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce4be2beece2629bd24bcab147741d1532bd5ed40fb52f2b4fcde5c5bf606df0"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5d365df54d18076f1d5f2ffb1eef2ac7f0d067789838f13d393b5586fbb77b02"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c93ae4d49d8cf8accfedc8a8e7815851f56ceb6e399b0c186754a68fed22844"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47a56e3628c227081a2aa58569cbf2af378bad8af648aa904080e87cd6644cfb"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8af68fe579f6f0b9a8b3f033d10caacfed6a4b89b8c7a1d9478a8f5d8aba4a1"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6eed1ddd3147de49226db4f213851cf7860493a7b6c7bd5e62516941c007094c"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ecbf85a6d73fc72f6534c38f7d92ed15d212e29e0dbe9810a465d61c8a66d23"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd218b338ac68213c997d4c88437c0e726f16d301616bf837e1468901934042c"}, + {file = "ijson-3.2.0.post0-cp310-cp310-win32.whl", hash = "sha256:4e7c4fdc7d24747c8cc7d528c145afda4de23210bf4054bd98cd63bf07e4882d"}, + {file = "ijson-3.2.0.post0-cp310-cp310-win_amd64.whl", hash = "sha256:4d4e143908f47307042c9678803d27706e0e2099d0a6c1988c6cae1da07760bf"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56500dac8f52989ef7c0075257a8b471cbea8ef77f1044822742b3cbf2246e8b"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:535665a77408b6bea56eb828806fae125846dff2e2e0ed4cb2e0a8e36244d753"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a4465c90b25ca7903410fabe4145e7b45493295cc3b84ec1216653fbe9021276"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efee1e9b4f691e1086730f3010e31c55625bc2e0f7db292a38a2cdf2774c2e13"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd55f7a46429de95383fc0d0158c1bfb798e976d59d52830337343c2d9bda5c"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25919b444426f58dcc62f763d1c6be6297f309da85ecab55f51da6ca86fc9fdf"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c85892d68895ba7a0b16a0e6b7d9f9a0e30e86f2b1e0f6986243473ba8735432"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27409ba44cfd006901971063d37699f72e092b5efaa1586288b5067d80c6b5bd"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:11dfd64633fe1382c4237477ac3836f682ca17e25e0d0799e84737795b0611df"}, + {file = "ijson-3.2.0.post0-cp311-cp311-win32.whl", hash = "sha256:41e955e173f77f54337fecaaa58a35c464b75e232b1f939b282497134a4d4f0e"}, + {file = "ijson-3.2.0.post0-cp311-cp311-win_amd64.whl", hash = "sha256:b3bdd2e12d9b9a18713dd6f3c5ef3734fdab25b79b177054ba9e35ecc746cb6e"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:26b57838e712b8852c40ec6d74c6de8bb226446440e1af1354c077a6f81b9142"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6464242f7895268d3086d7829ef031b05c77870dad1e13e51ef79d0a9cfe029"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c6cf18b61b94db9590f86af0dd60edbccb36e151643152b8688066f677fbc9"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:992e9e68003df32e2aa0f31eb82c0a94f21286203ab2f2b2c666410e17b59d2f"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d3e255ef05b434f20fc9d4b18ea15733d1038bec3e4960d772b06216fa79e82d"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:424232c2bf3e8181f1b572db92c179c2376b57eba9fc8931453fba975f48cb80"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bced6cd5b09d4d002dda9f37292dd58d26eb1c4d0d179b820d3708d776300bb4"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-win32.whl", hash = "sha256:a8c84dff2d60ae06d5280ec87cd63050bbd74a90c02bfc7c390c803cfc8ac8fc"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:a340413a9bf307fafd99254a4dd4ac6c567b91a205bf896dde18888315fd7fcd"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b3456cd5b16ec9db3ef23dd27f37bf5a14f765e8272e9af3e3de9ee9a4cba867"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eb838b4e4360e65c00aa13c78b35afc2477759d423b602b60335af5bed3de5b"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7f414edd69dd9199b0dfffa0ada22f23d8009e10fe2a719e0993b7dcc2e6e2"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:183841b8d033ca95457f61fb0719185dc7f51a616070bdf1dcaf03473bed05b2"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1302dc6490da7d44c3a76a5f0b87d8bec9f918454c6d6e6bf4ed922e47da58bb"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3b21b1ecd20ed2f918f6f99cdfa68284a416c0f015ffa64b68fa933df1b24d40"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e97e6e07851cefe7baa41f1ebf5c0899d2d00d94bfef59825752e4c784bebbe8"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-win32.whl", hash = "sha256:cd0450e76b9c629b7f86e7d5b91b7cc9c281dd719630160a992b19a856f7bdbd"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:bed8dcb7dbfdb98e647ad47676045e0891f610d38095dcfdae468e1e1efb2766"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a7698bc480df76073067017f73ba4139dbaae20f7a6c9a0c7855b9c5e9a62124"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2f204f6d4cedeb28326c230a0b046968b5263c234c65a5b18cee22865800fff7"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9829a17f6f78d7f4d0aeff28c126926a1e5f86828ebb60d6a0acfa0d08457f9f"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f470f3d750e00df86e03254fdcb422d2f726f4fb3a0d8eeee35e81343985e58a"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb167ee21d9c413d6b0ab65ec12f3e7ea0122879da8b3569fa1063526f9f03a8"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84eed88177f6c243c52b280cb094f751de600d98d2221e0dec331920894889ec"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53f1a13eb99ab514c562869513172135d4b55a914b344e6518ba09ad3ef1e503"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f6785ba0f65eb64b1ce3b7fcfec101085faf98f4e77b234f14287fd4138ffb25"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:79b94662c2e9d366ab362c2c5858097eae0da100dea0dfd340db09ab28c8d5e8"}, + {file = "ijson-3.2.0.post0-cp38-cp38-win32.whl", hash = "sha256:5242cb2313ba3ece307b426efa56424ac13cc291c36f292b501d412a98ad0703"}, + {file = "ijson-3.2.0.post0-cp38-cp38-win_amd64.whl", hash = "sha256:775444a3b647350158d0b3c6c39c88b4a0995643a076cb104bf25042c9aedcf8"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1d64ffaab1d006a4fa9584a4c723e95cc9609bf6c3365478e250cd0bffaaadf3"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:434e57e7ec5c334ccb0e67bb4d9e60c264dcb2a3843713dbeb12cb19fe42a668"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:158494bfe89ccb32618d0e53b471364080ceb975462ec464d9f9f37d9832b653"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f20072376e338af0e51ccecb02335b4e242d55a9218a640f545be7fc64cca99"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3e8d46c1004afcf2bf513a8fb575ee2ec3d8009a2668566b5926a2dcf7f1a45"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:986a0347fe19e5117a5241276b72add570839e5bcdc7a6dac4b538c5928eeff5"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:535a59d61b9aef6fc2a3d01564c1151e38e5a44b92cd6583cb4e8ccf0f58043f"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:830de03f391f7e72b8587bb178c22d534da31153e9ee4234d54ef82cde5ace5e"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6def9ac8d73b76cb02e9e9837763f27f71e5e67ec0afae5f1f4cf8f61c39b1ac"}, + {file = "ijson-3.2.0.post0-cp39-cp39-win32.whl", hash = "sha256:11bb84a53c37e227e733c6dffad2037391cf0b3474bff78596dc4373b02008a0"}, + {file = "ijson-3.2.0.post0-cp39-cp39-win_amd64.whl", hash = "sha256:f349bee14d0a4a72ba41e1b1cce52af324ebf704f5066c09e3dd04cfa6f545f0"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5418066666b25b05f2b8ae2698408daa0afa68f07b0b217f2ab24465b7e9cbd9"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ccc4d4b947549f9c431651c02b95ef571412c78f88ded198612a41d5c5701a0"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcec67fc15e5978ad286e8cc2a3f9347076e28e0e01673b5ace18c73da64e3ff"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee9537e8a8aa15dd2d0912737aeb6265e781e74f7f7cad8165048fcb5f39230"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:03dfd4c8ed19e704d04b0ad4f34f598dc569fd3f73089f80eed698e7f6069233"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2d50b2ad9c6c51ca160aa60de7f4dacd1357c38d0e503f51aed95c1c1945ff53"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c1db80d7791fb761ad9a6c70f521acd2c4b0e5afa2fe0d813beb2140d16c37"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13f2939db983327dd0492f6c1c0e77be3f2cbf9b620c92c7547d1d2cd6ef0486"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f9d449f86f8971c24609e319811f7f3b6b734f0218c4a0e799debe19300d15b"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7e0d1713a9074a7677eb8e43f424b731589d1c689d4676e2f57a5ce59d089e89"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c8646eb81eec559d7d8b1e51a5087299d06ecab3bc7da54c01f7df94350df135"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fe3a53e00c59de33b825ba8d6d39f544a7d7180983cd3d6bd2c3794ae35442"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93aaec00cbde65c192f15c21f3ee44d2ab0c11eb1a35020b5c4c2676f7fe01d0"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00594ed3ef2218fee8c652d9e7f862fb39f8251b67c6379ef12f7e044bf6bbf3"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1a75cfb34217b41136b714985be645f12269e4345da35d7b48aabd317c82fd10"}, + {file = "ijson-3.2.0.post0.tar.gz", hash = "sha256:80a5bd7e9923cab200701f67ad2372104328b99ddf249dbbe8834102c852d316"}, ] [[package]] name = "importlib-metadata" -version = "6.0.0" +version = "6.3.0" description = "Read metadata from Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, - {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, + {file = "importlib_metadata-6.3.0-py3-none-any.whl", hash = "sha256:8f8bd2af397cf33bd344d35cfe7f489219b7d14fc79a3f854b75b8417e9226b0"}, + {file = "importlib_metadata-6.3.0.tar.gz", hash = "sha256:23c2bcae4762dfb0bbe072d358faec24957901d75b6c4ab11172c0c982532402"}, ] [package.dependencies] @@ -672,53 +814,54 @@ testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packag [[package]] name = "importlib-resources" -version = "5.4.0" +version = "5.12.0" description = "Read resources from Python packages" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"}, - {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"}, + {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, ] [package.dependencies] zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} [package.extras] -docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] -testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "incremental" -version = "21.3.0" -description = "A small library that versions your Python projects." +version = "22.10.0" +description = "\"A small library that versions your Python projects.\"" category = "main" optional = false python-versions = "*" files = [ - {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"}, - {file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"}, + {file = "incremental-22.10.0-py2.py3-none-any.whl", hash = "sha256:b864a1f30885ee72c5ac2835a761b8fe8aa9c28b9395cacf27286602688d3e51"}, + {file = "incremental-22.10.0.tar.gz", hash = "sha256:912feeb5e0f7e0188e6f42241d2f450002e11bbc0937c65865045854c24c0bd0"}, ] [package.extras] +mypy = ["click (>=6.0)", "mypy (==0.812)", "twisted (>=16.4.0)"] scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] [[package]] name = "isort" -version = "5.11.4" +version = "5.11.5" description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" files = [ - {file = "isort-5.11.4-py3-none-any.whl", hash = "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"}, - {file = "isort-5.11.4.tar.gz", hash = "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6"}, + {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, + {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, ] [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] -pipfile-deprecated-finder = ["pipreqs", "requirementslib"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] requirements-deprecated-finder = ["pip-api", "pipreqs"] @@ -742,20 +885,39 @@ tornado = ">=4.3" [package.extras] tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"] +[[package]] +name = "jaraco-classes" +version = "3.2.3" +description = "Utility functions for Python class constructs" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"}, + {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + [[package]] name = "jeepney" -version = "0.7.1" +version = "0.8.0" description = "Low-level, pure Python DBus protocol wrapper." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"}, - {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, + {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, + {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, ] [package.extras] -test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"] +test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] trio = ["async_generator", "trio"] [[package]] @@ -869,25 +1031,28 @@ files = [ [[package]] name = "keyring" -version = "23.5.0" +version = "23.13.1" description = "Store and access your passwords safely." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"}, - {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"}, + {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"}, + {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"}, ] [package.dependencies] -importlib-metadata = ">=3.6" +importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} +importlib-resources = {version = "*", markers = "python_version < \"3.9\""} +"jaraco.classes" = "*" jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} -pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_platform == \"win32\""} +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} [package.extras] -docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] -testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] +completion = ["shtab"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] name = "ldap3" @@ -897,10 +1062,7 @@ category = "main" optional = true python-versions = "*" files = [ - {file = "ldap3-2.9.1-py2.6.egg", hash = "sha256:5ab7febc00689181375de40c396dcad4f2659cd260fc5e94c508b6d77c17e9d5"}, - {file = "ldap3-2.9.1-py2.7.egg", hash = "sha256:2bc966556fc4d4fa9f445a1c31dc484ee81d44a51ab0e2d0fd05b62cac75daa6"}, {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, - {file = "ldap3-2.9.1-py3.9.egg", hash = "sha256:5630d1383e09ba94839e253e013f1aa1a2cf7a547628ba1265cb7b9a844b5687"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, ] @@ -1000,54 +1162,90 @@ html5 = ["html5lib"] htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.7)"] +[[package]] +name = "markdown-it-py" +version = "2.2.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"}, + {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" +typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" -version = "2.1.0" +version = "2.1.2" description = "Safely add untrusted strings to HTML/XML markup." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, - {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, + {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, ] [[package]] @@ -1090,100 +1288,137 @@ Twisted = ">=15.1.0" [package.extras] dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "more-itertools" +version = "9.1.0" +description = "More routines for operating on iterables, beyond itertools" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"}, + {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"}, +] + [[package]] name = "msgpack" -version = "1.0.4" +version = "1.0.5" description = "MessagePack serializer" category = "main" optional = false python-versions = "*" files = [ - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"}, - {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"}, - {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"}, - {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"}, - {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"}, - {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"}, - {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"}, - {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"}, - {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"}, - {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"}, - {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"}, - {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"}, - {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, - {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"}, + {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"}, + {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"}, + {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"}, + {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"}, + {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"}, + {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"}, + {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"}, + {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"}, + {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"}, + {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"}, + {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"}, + {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"}, + {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"}, + {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"}, + {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"}, ] [[package]] name = "mypy" -version = "0.981" +version = "1.0.1" description = "Optional static typing for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"}, - {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"}, - {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"}, - {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"}, - {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"}, - {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"}, - {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"}, - {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"}, - {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"}, - {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"}, - {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"}, - {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"}, - {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"}, - {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"}, - {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"}, - {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"}, - {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"}, - {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"}, - {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"}, - {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"}, + {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, + {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, + {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"}, + {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"}, + {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"}, + {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"}, + {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"}, + {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"}, + {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"}, + {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"}, + {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"}, + {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"}, + {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"}, + {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"}, + {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"}, + {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"}, + {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"}, + {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"}, + {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"}, + {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"}, + {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"}, + {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"}, + {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"}, ] [package.dependencies] @@ -1194,35 +1429,36 @@ typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.5" files = [ - {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, - {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] name = "mypy-zope" -version = "0.3.11" +version = "0.9.1" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" files = [ - {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"}, - {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"}, + {file = "mypy-zope-0.9.1.tar.gz", hash = "sha256:4c87dbc71fec35f6533746ecdf9d400cd9281338d71c16b5676bb5ed00a97ca2"}, + {file = "mypy_zope-0.9.1-py3-none-any.whl", hash = "sha256:733d4399affe9e61e332ce9c4049418d6775c39b473e4b9f409d51c207c1b71a"}, ] [package.dependencies] -mypy = "0.981" +mypy = ">=1.0.0,<1.1.0" "zope.interface" = "*" "zope.schema" = "*" @@ -1257,26 +1493,26 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "22.0" +version = "23.0" description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, - {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, + {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, + {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, ] [[package]] name = "parameterized" -version = "0.8.1" +version = "0.9.0" description = "Parameterized testing with any Python test framework" category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7" files = [ - {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"}, - {file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"}, + {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, + {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, ] [package.extras] @@ -1284,136 +1520,125 @@ dev = ["jinja2"] [[package]] name = "pathspec" -version = "0.9.0" +version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.7" files = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, + {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, ] [[package]] name = "phonenumbers" -version = "8.13.2" +version = "8.13.9" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." category = "main" optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"}, - {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"}, + {file = "phonenumbers-8.13.9-py2.py3-none-any.whl", hash = "sha256:7706e490154551829110a158a94e3efdbd86b1d8607b7bb1e8d1cf27637db798"}, + {file = "phonenumbers-8.13.9.tar.gz", hash = "sha256:d2afb9f23b3fb99cd4dfce8b1e9005a66bb546470128a6ab5c1ac353463b2c8b"}, ] [[package]] name = "pillow" -version = "9.4.0" +version = "9.5.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, - {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, - {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, - {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, - {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, - {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, - {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, + {file = "Pillow-9.5.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:ace6ca218308447b9077c14ea4ef381ba0b67ee78d64046b3f19cf4e1139ad16"}, + {file = "Pillow-9.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d3d403753c9d5adc04d4694d35cf0391f0f3d57c8e0030aac09d7678fa8030aa"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba1b81ee69573fe7124881762bb4cd2e4b6ed9dd28c9c60a632902fe8db8b38"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7e1c262d3392afcf5071df9afa574544f28eac825284596ac6db56e6d11062"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f36397bf3f7d7c6a3abdea815ecf6fd14e7fcd4418ab24bae01008d8d8ca15e"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:252a03f1bdddce077eff2354c3861bf437c892fb1832f75ce813ee94347aa9b5"}, + {file = "Pillow-9.5.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85ec677246533e27770b0de5cf0f9d6e4ec0c212a1f89dfc941b64b21226009d"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b416f03d37d27290cb93597335a2f85ed446731200705b22bb927405320de903"}, + {file = "Pillow-9.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1781a624c229cb35a2ac31cc4a77e28cafc8900733a864870c49bfeedacd106a"}, + {file = "Pillow-9.5.0-cp310-cp310-win32.whl", hash = "sha256:8507eda3cd0608a1f94f58c64817e83ec12fa93a9436938b191b80d9e4c0fc44"}, + {file = "Pillow-9.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:d3c6b54e304c60c4181da1c9dadf83e4a54fd266a99c70ba646a9baa626819eb"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:7ec6f6ce99dab90b52da21cf0dc519e21095e332ff3b399a357c187b1a5eee32"}, + {file = "Pillow-9.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:560737e70cb9c6255d6dcba3de6578a9e2ec4b573659943a5e7e4af13f298f5c"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96e88745a55b88a7c64fa49bceff363a1a27d9a64e04019c2281049444a571e3"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d9c206c29b46cfd343ea7cdfe1232443072bbb270d6a46f59c259460db76779a"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfcc2c53c06f2ccb8976fb5c71d448bdd0a07d26d8e07e321c103416444c7ad1"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a0f9bb6c80e6efcde93ffc51256d5cfb2155ff8f78292f074f60f9e70b942d99"}, + {file = "Pillow-9.5.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:8d935f924bbab8f0a9a28404422da8af4904e36d5c33fc6f677e4c4485515625"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fed1e1cf6a42577953abbe8e6cf2fe2f566daebde7c34724ec8803c4c0cda579"}, + {file = "Pillow-9.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c1170d6b195555644f0616fd6ed929dfcf6333b8675fcca044ae5ab110ded296"}, + {file = "Pillow-9.5.0-cp311-cp311-win32.whl", hash = "sha256:54f7102ad31a3de5666827526e248c3530b3a33539dbda27c6843d19d72644ec"}, + {file = "Pillow-9.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfa4561277f677ecf651e2b22dc43e8f5368b74a25a8f7d1d4a3a243e573f2d4"}, + {file = "Pillow-9.5.0-cp311-cp311-win_arm64.whl", hash = "sha256:965e4a05ef364e7b973dd17fc765f42233415974d773e82144c9bbaaaea5d089"}, + {file = "Pillow-9.5.0-cp312-cp312-win32.whl", hash = "sha256:22baf0c3cf0c7f26e82d6e1adf118027afb325e703922c8dfc1d5d0156bb2eeb"}, + {file = "Pillow-9.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:432b975c009cf649420615388561c0ce7cc31ce9b2e374db659ee4f7d57a1f8b"}, + {file = "Pillow-9.5.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5d4ebf8e1db4441a55c509c4baa7a0587a0210f7cd25fcfe74dbbce7a4bd1906"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:375f6e5ee9620a271acb6820b3d1e94ffa8e741c0601db4c0c4d3cb0a9c224bf"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99eb6cafb6ba90e436684e08dad8be1637efb71c4f2180ee6b8f940739406e78"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dfaaf10b6172697b9bceb9a3bd7b951819d1ca339a5ef294d1f1ac6d7f63270"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:763782b2e03e45e2c77d7779875f4432e25121ef002a41829d8868700d119392"}, + {file = "Pillow-9.5.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:35f6e77122a0c0762268216315bf239cf52b88865bba522999dc38f1c52b9b47"}, + {file = "Pillow-9.5.0-cp37-cp37m-win32.whl", hash = "sha256:aca1c196f407ec7cf04dcbb15d19a43c507a81f7ffc45b690899d6a76ac9fda7"}, + {file = "Pillow-9.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322724c0032af6692456cd6ed554bb85f8149214d97398bb80613b04e33769f6"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a0aa9417994d91301056f3d0038af1199eb7adc86e646a36b9e050b06f526597"}, + {file = "Pillow-9.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8286396b351785801a976b1e85ea88e937712ee2c3ac653710a4a57a8da5d9c"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c830a02caeb789633863b466b9de10c015bded434deb3ec87c768e53752ad22a"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fbd359831c1657d69bb81f0db962905ee05e5e9451913b18b831febfe0519082"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8fc330c3370a81bbf3f88557097d1ea26cd8b019d6433aa59f71195f5ddebbf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:7002d0797a3e4193c7cdee3198d7c14f92c0836d6b4a3f3046a64bd1ce8df2bf"}, + {file = "Pillow-9.5.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:229e2c79c00e85989a34b5981a2b67aa079fd08c903f0aaead522a1d68d79e51"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9adf58f5d64e474bed00d69bcd86ec4bcaa4123bfa70a65ce72e424bfb88ed96"}, + {file = "Pillow-9.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:662da1f3f89a302cc22faa9f14a262c2e3951f9dbc9617609a47521c69dd9f8f"}, + {file = "Pillow-9.5.0-cp38-cp38-win32.whl", hash = "sha256:6608ff3bf781eee0cd14d0901a2b9cc3d3834516532e3bd673a0a204dc8615fc"}, + {file = "Pillow-9.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:e49eb4e95ff6fd7c0c402508894b1ef0e01b99a44320ba7d8ecbabefddcc5569"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:482877592e927fd263028c105b36272398e3e1be3269efda09f6ba21fd83ec66"}, + {file = "Pillow-9.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3ded42b9ad70e5f1754fb7c2e2d6465a9c842e41d178f262e08b8c85ed8a1d8e"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c446d2245ba29820d405315083d55299a796695d747efceb5717a8b450324115"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aca1152d93dcc27dc55395604dcfc55bed5f25ef4c98716a928bacba90d33a3"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608488bdcbdb4ba7837461442b90ea6f3079397ddc968c31265c1e056964f1ef"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:60037a8db8750e474af7ffc9faa9b5859e6c6d0a50e55c45576bf28be7419705"}, + {file = "Pillow-9.5.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:07999f5834bdc404c442146942a2ecadd1cb6292f5229f4ed3b31e0a108746b1"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a127ae76092974abfbfa38ca2d12cbeddcdeac0fb71f9627cc1135bedaf9d51a"}, + {file = "Pillow-9.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:489f8389261e5ed43ac8ff7b453162af39c3e8abd730af8363587ba64bb2e865"}, + {file = "Pillow-9.5.0-cp39-cp39-win32.whl", hash = "sha256:9b1af95c3a967bf1da94f253e56b6286b50af23392a886720f563c547e48e964"}, + {file = "Pillow-9.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:77165c4a5e7d5a284f10a6efaa39a0ae8ba839da344f20b111d62cc932fa4e5d"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:833b86a98e0ede388fa29363159c9b1a294b0905b5128baf01db683672f230f5"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aaf305d6d40bd9632198c766fb64f0c1a83ca5b667f16c1e79e1661ab5060140"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0852ddb76d85f127c135b6dd1f0bb88dbb9ee990d2cd9aa9e28526c93e794fba"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:91ec6fe47b5eb5a9968c79ad9ed78c342b1f97a091677ba0e012701add857829"}, + {file = "Pillow-9.5.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb841572862f629b99725ebaec3287fc6d275be9b14443ea746c1dd325053cbd"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c380b27d041209b849ed246b111b7c166ba36d7933ec6e41175fd15ab9eb1572"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c9af5a3b406a50e313467e3565fc99929717f780164fe6fbb7704edba0cebbe"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671583eab84af046a397d6d0ba25343c00cd50bce03787948e0fff01d4fd9b1"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:84a6f19ce086c1bf894644b43cd129702f781ba5751ca8572f08aa40ef0ab7b7"}, + {file = "Pillow-9.5.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1e7723bd90ef94eda669a3c2c19d549874dd5badaeefabefd26053304abe5799"}, + {file = "Pillow-9.5.0.tar.gz", hash = "sha256:bf548479d336726d7a0eceb6e767e179fbde37833ae42794602631a070d630f1"}, ] [package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "pkginfo" -version = "1.8.2" -description = "Query metadatdata from sdists / bdists / installed packages." +version = "1.9.6" +description = "Query metadata from sdists / bdists / installed packages." category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" files = [ - {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, - {file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"}, + {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"}, + {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"}, ] [package.extras] -testing = ["coverage", "nose"] +testing = ["pytest", "pytest-cov"] [[package]] -name = "pkgutil_resolve_name" +name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." category = "main" @@ -1426,30 +1651,33 @@ files = [ [[package]] name = "platformdirs" -version = "2.5.1" -description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +version = "3.2.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, - {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, + {file = "platformdirs-3.2.0-py3-none-any.whl", hash = "sha256:ebe11c0d7a805086e99506aa331612429a72ca7cd52a1f0d277dc4adc20cb10e"}, + {file = "platformdirs-3.2.0.tar.gz", hash = "sha256:d5b638ca397f25f979350ff789db335903d7ea010ab28903f57b27e1b16c2b08"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.5", markers = "python_version < \"3.8\""} + [package.extras] -docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] -test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] [[package]] name = "prometheus-client" -version = "0.15.0" +version = "0.16.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, - {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, + {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"}, + {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"}, ] [package.extras] @@ -1457,25 +1685,25 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.5" +version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = true python-versions = ">=3.6" files = [ - {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, - {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, - {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"}, - {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"}, - {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"}, - {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"}, - {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"}, - {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"}, - {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"}, + {file = "psycopg2-2.9.6-cp310-cp310-win32.whl", hash = "sha256:f7a7a5ee78ba7dc74265ba69e010ae89dae635eea0e97b055fb641a01a31d2b1"}, + {file = "psycopg2-2.9.6-cp310-cp310-win_amd64.whl", hash = "sha256:f75001a1cbbe523e00b0ef896a5a1ada2da93ccd752b7636db5a99bc57c44494"}, + {file = "psycopg2-2.9.6-cp311-cp311-win32.whl", hash = "sha256:53f4ad0a3988f983e9b49a5d9765d663bbe84f508ed655affdb810af9d0972ad"}, + {file = "psycopg2-2.9.6-cp311-cp311-win_amd64.whl", hash = "sha256:b81fcb9ecfc584f661b71c889edeae70bae30d3ef74fa0ca388ecda50b1222b7"}, + {file = "psycopg2-2.9.6-cp36-cp36m-win32.whl", hash = "sha256:11aca705ec888e4f4cea97289a0bf0f22a067a32614f6ef64fcf7b8bfbc53744"}, + {file = "psycopg2-2.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:36c941a767341d11549c0fbdbb2bf5be2eda4caf87f65dfcd7d146828bd27f39"}, + {file = "psycopg2-2.9.6-cp37-cp37m-win32.whl", hash = "sha256:869776630c04f335d4124f120b7fb377fe44b0a7645ab3c34b4ba42516951889"}, + {file = "psycopg2-2.9.6-cp37-cp37m-win_amd64.whl", hash = "sha256:a8ad4a47f42aa6aec8d061fdae21eaed8d864d4bb0f0cade5ad32ca16fcd6258"}, + {file = "psycopg2-2.9.6-cp38-cp38-win32.whl", hash = "sha256:2362ee4d07ac85ff0ad93e22c693d0f37ff63e28f0615a16b6635a645f4b9214"}, + {file = "psycopg2-2.9.6-cp38-cp38-win_amd64.whl", hash = "sha256:d24ead3716a7d093b90b27b3d73459fe8cd90fd7065cf43b3c40966221d8c394"}, + {file = "psycopg2-2.9.6-cp39-cp39-win32.whl", hash = "sha256:1861a53a6a0fd248e42ea37c957d36950da00266378746588eab4f4b5649e95f"}, + {file = "psycopg2-2.9.6-cp39-cp39-win_amd64.whl", hash = "sha256:ded2faa2e6dfb430af7713d87ab4abbfc764d8d7fb73eafe96a24155f906ebf5"}, + {file = "psycopg2-2.9.6.tar.gz", hash = "sha256:f15158418fd826831b28585e2ab48ed8df2d0d98f502a2b4fe619e7d5ca29011"}, ] [[package]] @@ -1515,18 +1743,7 @@ category = "main" optional = false python-versions = "*" files = [ - {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, - {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, - {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, - {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, - {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, - {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, - {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, - {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, - {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, - {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, ] @@ -1539,18 +1756,7 @@ optional = false python-versions = "*" files = [ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, - {file = "pyasn1_modules-0.2.8-py2.4.egg", hash = "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199"}, - {file = "pyasn1_modules-0.2.8-py2.5.egg", hash = "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405"}, - {file = "pyasn1_modules-0.2.8-py2.6.egg", hash = "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb"}, - {file = "pyasn1_modules-0.2.8-py2.7.egg", hash = "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8"}, {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, - {file = "pyasn1_modules-0.2.8-py3.1.egg", hash = "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d"}, - {file = "pyasn1_modules-0.2.8-py3.2.egg", hash = "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45"}, - {file = "pyasn1_modules-0.2.8-py3.3.egg", hash = "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4"}, - {file = "pyasn1_modules-0.2.8-py3.4.egg", hash = "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811"}, - {file = "pyasn1_modules-0.2.8-py3.5.egg", hash = "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed"}, - {file = "pyasn1_modules-0.2.8-py3.6.egg", hash = "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0"}, - {file = "pyasn1_modules-0.2.8-py3.7.egg", hash = "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd"}, ] [package.dependencies] @@ -1570,48 +1776,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.4" +version = "1.10.7" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"}, - {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"}, - {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"}, - {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"}, - {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"}, - {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"}, - {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"}, - {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"}, - {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"}, - {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"}, + {file = "pydantic-1.10.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e79e999e539872e903767c417c897e729e015872040e56b96e67968c3b918b2d"}, + {file = "pydantic-1.10.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01aea3a42c13f2602b7ecbbea484a98169fb568ebd9e247593ea05f01b884b2e"}, + {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:516f1ed9bc2406a0467dd777afc636c7091d71f214d5e413d64fef45174cfc7a"}, + {file = "pydantic-1.10.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae150a63564929c675d7f2303008d88426a0add46efd76c3fc797cd71cb1b46f"}, + {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ecbbc51391248116c0a055899e6c3e7ffbb11fb5e2a4cd6f2d0b93272118a209"}, + {file = "pydantic-1.10.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f4a2b50e2b03d5776e7f21af73e2070e1b5c0d0df255a827e7c632962f8315af"}, + {file = "pydantic-1.10.7-cp310-cp310-win_amd64.whl", hash = "sha256:a7cd2251439988b413cb0a985c4ed82b6c6aac382dbaff53ae03c4b23a70e80a"}, + {file = "pydantic-1.10.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:68792151e174a4aa9e9fc1b4e653e65a354a2fa0fed169f7b3d09902ad2cb6f1"}, + {file = "pydantic-1.10.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe2507b8ef209da71b6fb5f4e597b50c5a34b78d7e857c4f8f3115effaef5fe"}, + {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10a86d8c8db68086f1e30a530f7d5f83eb0685e632e411dbbcf2d5c0150e8dcd"}, + {file = "pydantic-1.10.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d75ae19d2a3dbb146b6f324031c24f8a3f52ff5d6a9f22f0683694b3afcb16fb"}, + {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:464855a7ff7f2cc2cf537ecc421291b9132aa9c79aef44e917ad711b4a93163b"}, + {file = "pydantic-1.10.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:193924c563fae6ddcb71d3f06fa153866423ac1b793a47936656e806b64e24ca"}, + {file = "pydantic-1.10.7-cp311-cp311-win_amd64.whl", hash = "sha256:b4a849d10f211389502059c33332e91327bc154acc1845f375a99eca3afa802d"}, + {file = "pydantic-1.10.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cc1dde4e50a5fc1336ee0581c1612215bc64ed6d28d2c7c6f25d2fe3e7c3e918"}, + {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0cfe895a504c060e5d36b287ee696e2fdad02d89e0d895f83037245218a87fe"}, + {file = "pydantic-1.10.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:670bb4683ad1e48b0ecb06f0cfe2178dcf74ff27921cdf1606e527d2617a81ee"}, + {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:950ce33857841f9a337ce07ddf46bc84e1c4946d2a3bba18f8280297157a3fd1"}, + {file = "pydantic-1.10.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c15582f9055fbc1bfe50266a19771bbbef33dd28c45e78afbe1996fd70966c2a"}, + {file = "pydantic-1.10.7-cp37-cp37m-win_amd64.whl", hash = "sha256:82dffb306dd20bd5268fd6379bc4bfe75242a9c2b79fec58e1041fbbdb1f7914"}, + {file = "pydantic-1.10.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c7f51861d73e8b9ddcb9916ae7ac39fb52761d9ea0df41128e81e2ba42886cd"}, + {file = "pydantic-1.10.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6434b49c0b03a51021ade5c4daa7d70c98f7a79e95b551201fff682fc1661245"}, + {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64d34ab766fa056df49013bb6e79921a0265204c071984e75a09cbceacbbdd5d"}, + {file = "pydantic-1.10.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:701daea9ffe9d26f97b52f1d157e0d4121644f0fcf80b443248434958fd03dc3"}, + {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf135c46099ff3f919d2150a948ce94b9ce545598ef2c6c7bf55dca98a304b52"}, + {file = "pydantic-1.10.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0f85904f73161817b80781cc150f8b906d521fa11e3cdabae19a581c3606209"}, + {file = "pydantic-1.10.7-cp38-cp38-win_amd64.whl", hash = "sha256:9f6f0fd68d73257ad6685419478c5aece46432f4bdd8d32c7345f1986496171e"}, + {file = "pydantic-1.10.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c230c0d8a322276d6e7b88c3f7ce885f9ed16e0910354510e0bae84d54991143"}, + {file = "pydantic-1.10.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:976cae77ba6a49d80f461fd8bba183ff7ba79f44aa5cfa82f1346b5626542f8e"}, + {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d45fc99d64af9aaf7e308054a0067fdcd87ffe974f2442312372dfa66e1001d"}, + {file = "pydantic-1.10.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2a5ebb48958754d386195fe9e9c5106f11275867051bf017a8059410e9abf1f"}, + {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:abfb7d4a7cd5cc4e1d1887c43503a7c5dd608eadf8bc615413fc498d3e4645cd"}, + {file = "pydantic-1.10.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:80b1fab4deb08a8292d15e43a6edccdffa5377a36a4597bb545b93e79c5ff0a5"}, + {file = "pydantic-1.10.7-cp39-cp39-win_amd64.whl", hash = "sha256:d71e69699498b020ea198468e2480a2f1e7433e32a3a99760058c6520e2bea7e"}, + {file = "pydantic-1.10.7-py3-none-any.whl", hash = "sha256:0cd181f1d0b1d00e2b705f1bf1ac7799a2d938cce3376b8007df62b29be3c2c6"}, + {file = "pydantic-1.10.7.tar.gz", hash = "sha256:cfc83c0678b6ba51b0532bea66860617c4cd4251ecf76e9846fa5a9f3454e97e"}, ] [package.dependencies] @@ -1623,37 +1829,37 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.57" +version = "1.58.1" description = "Use the full Github API v3" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"}, - {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"}, + {file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"}, + {file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"}, ] [package.dependencies] deprecated = "*" -pyjwt = ">=2.4.0" +pyjwt = {version = ">=2.4.0", extras = ["crypto"]} pynacl = ">=1.4.0" requests = ">=2.14.0" -[package.extras] -integrations = ["cryptography"] - [[package]] name = "pygments" -version = "2.11.2" +version = "2.15.0" description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" files = [ - {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, - {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, + {file = "Pygments-2.15.0-py3-none-any.whl", hash = "sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094"}, + {file = "Pygments-2.15.0.tar.gz", hash = "sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500"}, ] +[package.extras] +plugins = ["importlib-metadata"] + [[package]] name = "pyicu" version = "2.10.2" @@ -1667,20 +1873,23 @@ files = [ [[package]] name = "pyjwt" -version = "2.4.0" +version = "2.6.0" description = "JSON Web Token implementation in Python" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, - {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, + {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"}, + {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"}, ] +[package.dependencies] +cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""} + [package.extras] -crypto = ["cryptography (>=3.3.1)"] -dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] -docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] @@ -1735,23 +1944,23 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "23.0.0" +version = "23.1.1" description = "Python wrapper module around the OpenSSL library" category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "pyOpenSSL-23.0.0-py3-none-any.whl", hash = "sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0"}, - {file = "pyOpenSSL-23.0.0.tar.gz", hash = "sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f"}, + {file = "pyOpenSSL-23.1.1-py3-none-any.whl", hash = "sha256:9e0c526404a210df9d2b18cd33364beadb0dc858a739b885677bc65e105d4a4c"}, + {file = "pyOpenSSL-23.1.1.tar.gz", hash = "sha256:841498b9bec61623b1b6c47ebbc02367c07d60e0e195f19790817f10cc8db0b7"}, ] [package.dependencies] -cryptography = ">=38.0.0,<40" +cryptography = ">=38.0.0,<41" [package.extras] docs = ["sphinx (!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] @@ -1759,57 +1968,62 @@ test = ["flaky", "pretend", "pytest (>=3.0.1)"] [[package]] name = "pyrsistent" -version = "0.18.1" +version = "0.19.3" description = "Persistent/Functional/Immutable data structures" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, - {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, - {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, - {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, ] [[package]] name = "pysaml2" -version = "7.2.1" +version = "7.3.1" description = "Python implementation of SAML Version 2 Standard" category = "main" optional = true -python-versions = "<4,>=3.6" +python-versions = ">=3.6.2,<4.0.0" files = [ - {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"}, - {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"}, + {file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"}, + {file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"}, ] [package.dependencies] cryptography = ">=3.1" defusedxml = "*" +importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} importlib-resources = {version = "*", markers = "python_version < \"3.9\""} -pyOpenSSL = "*" +pyopenssl = "*" python-dateutil = "*" pytz = "*" -requests = ">=1.0.0" -setuptools = "*" -six = "*" +requests = ">=2,<3" xmlschema = ">=1.2.1" [package.extras] @@ -1832,14 +2046,14 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2021.3" +version = "2023.3" description = "World timezone definitions, modern and historical" category = "main" optional = true python-versions = "*" files = [ - {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, - {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, + {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, + {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, ] [[package]] @@ -1906,14 +2120,14 @@ files = [ [[package]] name = "readme-renderer" -version = "37.2" +version = "37.3" description = "readme_renderer is a library for rendering \"readme\" descriptions for Warehouse" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"}, - {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"}, + {file = "readme_renderer-37.3-py3-none-any.whl", hash = "sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343"}, + {file = "readme_renderer-37.3.tar.gz", hash = "sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273"}, ] [package.dependencies] @@ -1926,36 +2140,36 @@ md = ["cmarkgfm (>=0.8.0)"] [[package]] name = "requests" -version = "2.27.1" +version = "2.28.2" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7, <4" files = [ - {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, - {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, + {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, + {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} -idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<5)"] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "requests-toolbelt" -version = "0.9.1" +version = "0.10.1" description = "A utility belt for advanced users of python-requests" category = "dev" optional = false -python-versions = "*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ - {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"}, - {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"}, + {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, + {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, ] [package.dependencies] @@ -1978,60 +2192,61 @@ idna2008 = ["idna"] [[package]] name = "rich" -version = "12.6.0" +version = "13.3.3" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" category = "dev" optional = false -python-versions = ">=3.6.3,<4.0.0" +python-versions = ">=3.7.0" files = [ - {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, - {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, + {file = "rich-13.3.3-py3-none-any.whl", hash = "sha256:540c7d6d26a1178e8e8b37e9ba44573a3cd1464ff6348b99ee7061b95d1c6333"}, + {file = "rich-13.3.3.tar.gz", hash = "sha256:dc84400a9d842b3a9c5ff74addd8eb798d155f36c1c91303888e0a66850d2a15"}, ] [package.dependencies] -commonmark = ">=0.9.0,<0.10.0" -pygments = ">=2.6.0,<3.0.0" +markdown-it-py = ">=2.2.0,<3.0.0" +pygments = ">=2.13.0,<3.0.0" typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] -jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] +jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "ruff" -version = "0.0.230" +version = "0.0.252" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.230-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:fcc31d02cebda0a85e2e13a44642aea7f84362cb4f589e2f6b864e3928e4a7db"}, - {file = "ruff-0.0.230-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:45a7f2c7155d520b8ca255a01235763d5c25fd5e7af055e50a78c6d91ece0ced"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4eca8b185ab56cac67acc23287c3c8c62a0c0ffadc0787a3bef3a6e77eaed82f"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec2bcdb5040efd8082a3a98369eec4bdc5fd05f53cc6714cb2b725d557d4abe8"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26571aee2b93b60e47e44478f72a9787b387f752e85b85f176739bd91b27cfd1"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b69c9883c3e264f8bb2d52bdabb88b8d9672750ea05f33e0ff52532824bd5c5"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b3dc88b83f200378a9b9c91036989f0285a10759514c42235ce02e5824ac8d0"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:767716f008dd3a40ec2318396f648fda437c6968087a4526cde5879e382cf477"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac27a0f9b96d9923cef7d911790a21a19b51aec0f08375ccc47ad735b1054d78"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:729dfc7b7ad4f7d8761dc60c58f15372d6f5c2dd9b6c5952524f2bc3aec7de6a"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ad086cf2e5fef274687121f673f0f9b60c8981ec07c2bb0448c459cbaef81bcb"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4feaed0978c24687133cd11c7380de20aa841f893e24430c735cc6c3faba4837"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d1046d0d43a0f24b2e9e61d76bb201b486ad02e9787d3432af43bd7d16f2c2e"}, - {file = "ruff-0.0.230-py3-none-win32.whl", hash = "sha256:4d627911c9ba57bcd2f2776f1c09a10d334db163cb5be8c892e7ec7b59ccf58c"}, - {file = "ruff-0.0.230-py3-none-win_amd64.whl", hash = "sha256:27fd4891a1d0642f5b2038ebf86f8169bc3d466964bdfaa0ce2a65149bc7cced"}, - {file = "ruff-0.0.230.tar.gz", hash = "sha256:a049f93af1057ac450e8c09559d44e371eda1c151b1b863c0013a1066fefddb0"}, + {file = "ruff-0.0.252-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:349367a227c4db7abbc3a9993efea8a608b5bea4bb4a1e5fc6f0d56819524f92"}, + {file = "ruff-0.0.252-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ce77f9106d96b4faf7865860fb5155b9deaf6f699d9c279118c5ad947739ecaf"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edadb0b050293b4e60dab979ba6a4e734d9c899cbe316a0ee5b65e3cdd39c750"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4efdae98937d1e4d23ab0b7fc7e8e6b6836cc7d2d42238ceeacbc793ef780542"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8546d879f7d3f669379a03e7b103d90e11901976ab508aeda59c03dfd8a359e"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:83fdc7169b6c1fb5fe8d1cdf345697f558c1b433ef97df9ca11defa2a8f3ee9e"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84ed9be1a17e2a556a571a5b959398633dd10910abd8dcf8b098061e746e892d"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f5e77bd9ba4438cf2ee32154e2673afe22f538ef29f5d65ca47e3dc46c42cf8"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5179b94b45c0f8512eaff3ab304c14714a46df2e9ca72a9d96084adc376b71"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:92efd8a71157595df5bc46aaaa0613d8a2fbc5cddc53ae7b749c16025c324732"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd350fc10832cfd28e681d829a8aa83ea3e653326e0ea9d98637dfb8d46177d2"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f119240c9631216e846166e06023b1d878e25fbac93bf20da50069e91cfbfaee"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5c5a49f89f5ede93d16eddfeeadd7e5739ec703e8f63ac95eac30236b9e49da3"}, + {file = "ruff-0.0.252-py3-none-win32.whl", hash = "sha256:89a897dc743f2fe063483ea666097e72e848f4bbe40493fe0533e61799959f6e"}, + {file = "ruff-0.0.252-py3-none-win_amd64.whl", hash = "sha256:cdc89ad6ff88519b1fb1816ac82a9ad910762c90ff5fd64dda7691b72d36aff7"}, + {file = "ruff-0.0.252-py3-none-win_arm64.whl", hash = "sha256:4b594a17cf53077165429486650658a0e1b2ac6ab88954f5afd50d2b1b5657a9"}, + {file = "ruff-0.0.252.tar.gz", hash = "sha256:6992611ab7bdbe7204e4831c95ddd3febfeece2e6f5e44bbed044454c7db0f63"}, ] [[package]] name = "secretstorage" -version = "3.3.1" +version = "3.3.3" description = "Python bindings to FreeDesktop.org Secret Service API" category = "dev" optional = false python-versions = ">=3.6" files = [ - {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, - {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, + {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, + {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, ] [package.dependencies] @@ -2056,14 +2271,14 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.12.1" +version = "1.19.1" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.12.1.tar.gz", hash = "sha256:5bbe4b72de22f9ac1e67f2a4e6efe8fbd595bb59b7b223443f50fe5802a5551c"}, - {file = "sentry_sdk-1.12.1-py2.py3-none-any.whl", hash = "sha256:9f0b960694e2d8bb04db4ba6ac2a645040caef4e762c65937998ff06064f10d6"}, + {file = "sentry-sdk-1.19.1.tar.gz", hash = "sha256:7ae78bd921981a5010ab540d6bdf3b793659a4db8cccf7f16180702d48a80d84"}, + {file = "sentry_sdk-1.19.1-py2.py3-none-any.whl", hash = "sha256:885a11c69df23e53eb281d003b9ff15a5bdfa43d8a2a53589be52104a1b4582f"}, ] [package.dependencies] @@ -2072,6 +2287,7 @@ urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} [package.extras] aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] beam = ["apache-beam (>=2.12)"] bottle = ["bottle (>=0.12.13)"] celery = ["celery (>=3)"] @@ -2080,8 +2296,10 @@ django = ["django (>=1.8)"] falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"] +grpcio = ["grpcio (>=1.21.1)"] httpx = ["httpx (>=0.16.0)"] -opentelemetry = ["opentelemetry-distro (>=0.350b0)"] +huey = ["huey (>=2)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] @@ -2090,6 +2308,7 @@ rq = ["rq (>=0.6)"] sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] tornado = ["tornado (>=5)"] [[package]] @@ -2119,18 +2338,18 @@ tests = ["coverage[toml] (>=5.0.2)", "pytest"] [[package]] name = "setuptools" -version = "65.5.1" +version = "67.6.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "setuptools-65.5.1-py3-none-any.whl", hash = "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31"}, - {file = "setuptools-65.5.1.tar.gz", hash = "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"}, + {file = "setuptools-67.6.1-py3-none-any.whl", hash = "sha256:e728ca814a823bf7bf60162daf9db95b93d532948c4c0bea762ce62f60189078"}, + {file = "setuptools-67.6.1.tar.gz", hash = "sha256:257de92a9d50a60b8e22abfcbb771571fde0dbf3ec234463212027a4eeecbe9a"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] @@ -2175,73 +2394,97 @@ dev = ["typing-extensions (>=3.5)"] [[package]] name = "simplejson" -version = "3.17.6" +version = "3.19.1" description = "Simple, fast, extensible JSON encoder/decoder for Python" category = "main" optional = false python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" files = [ - {file = "simplejson-3.17.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a89acae02b2975b1f8e4974cb8cdf9bf9f6c91162fb8dec50c259ce700f2770a"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:82ff356ff91be0ab2293fc6d8d262451eb6ac4fd999244c4b5f863e049ba219c"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0de783e9c2b87bdd75b57efa2b6260c24b94605b5c9843517577d40ee0c3cc8a"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d24a9e61df7a7787b338a58abfba975414937b609eb6b18973e25f573bc0eeeb"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e8603e691580487f11306ecb066c76f1f4a8b54fb3bdb23fa40643a059509366"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9b01e7b00654115965a206e3015f0166674ec1e575198a62a977355597c0bef5"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:37bc0cf0e5599f36072077e56e248f3336917ded1d33d2688624d8ed3cefd7d2"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cf6e7d5fe2aeb54898df18db1baf479863eae581cce05410f61f6b4188c8ada1"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bdfc54b4468ed4cd7415928cbe782f4d782722a81aeb0f81e2ddca9932632211"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd16302d39c4d6f4afde80edd0c97d4db643327d355a312762ccd9bd2ca515ed"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:deac4bdafa19bbb89edfb73b19f7f69a52d0b5bd3bb0c4ad404c1bbfd7b4b7fd"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8bbdb166e2fb816e43ab034c865147edafe28e1b19c72433147789ac83e2dda"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7854326920d41c3b5d468154318fe6ba4390cb2410480976787c640707e0180"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f63600ec06982cdf480899026f4fda622776f5fabed9a869fdb32d72bc17e99a"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e03c3b8cc7883a54c3f34a6a135c4a17bc9088a33f36796acdb47162791b02f6"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a2d30d6c1652140181dc6861f564449ad71a45e4f165a6868c27d36745b65d40"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1aa6e4cae8e3b8d5321be4f51c5ce77188faf7baa9fe1e78611f93a8eed2882"}, - {file = "simplejson-3.17.6-cp310-cp310-win32.whl", hash = "sha256:97202f939c3ff341fc3fa84d15db86156b1edc669424ba20b0a1fcd4a796a045"}, - {file = "simplejson-3.17.6-cp310-cp310-win_amd64.whl", hash = "sha256:80d3bc9944be1d73e5b1726c3bbfd2628d3d7fe2880711b1eb90b617b9b8ac70"}, - {file = "simplejson-3.17.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9fa621b3c0c05d965882c920347b6593751b7ab20d8fa81e426f1735ca1a9fc7"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2fb11922f58df8528adfca123f6a84748ad17d066007e7ac977720063556bd"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:724c1fe135aa437d5126138d977004d165a3b5e2ee98fc4eb3e7c0ef645e7e27"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ff4ac6ff3aa8f814ac0f50bf218a2e1a434a17aafad4f0400a57a8cc62ef17f"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:67093a526e42981fdd954868062e56c9b67fdd7e712616cc3265ad0c210ecb51"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5d6b4af7ad7e4ac515bc6e602e7b79e2204e25dbd10ab3aa2beef3c5a9cad2c7"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1c9b1ed7ed282b36571638297525f8ef80f34b3e2d600a56f962c6044f24200d"}, - {file = "simplejson-3.17.6-cp36-cp36m-win32.whl", hash = "sha256:632ecbbd2228575e6860c9e49ea3cc5423764d5aa70b92acc4e74096fb434044"}, - {file = "simplejson-3.17.6-cp36-cp36m-win_amd64.whl", hash = "sha256:4c09868ddb86bf79b1feb4e3e7e4a35cd6e61ddb3452b54e20cf296313622566"}, - {file = "simplejson-3.17.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b6bd8144f15a491c662f06814bd8eaa54b17f26095bb775411f39bacaf66837"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5decdc78849617917c206b01e9fc1d694fd58caa961be816cb37d3150d613d9a"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:521877c7bd060470806eb6335926e27453d740ac1958eaf0d8c00911bc5e1802"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:65b998193bd7b0c7ecdfffbc825d808eac66279313cb67d8892bb259c9d91494"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac786f6cb7aa10d44e9641c7a7d16d7f6e095b138795cd43503769d4154e0dc2"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3ff5b3464e1ce86a8de8c88e61d4836927d5595c2162cab22e96ff551b916e81"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:69bd56b1d257a91e763256d63606937ae4eb890b18a789b66951c00062afec33"}, - {file = "simplejson-3.17.6-cp37-cp37m-win32.whl", hash = "sha256:b81076552d34c27e5149a40187a8f7e2abb2d3185576a317aaf14aeeedad862a"}, - {file = "simplejson-3.17.6-cp37-cp37m-win_amd64.whl", hash = "sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4d1c135af0c72cb28dd259cf7ba218338f4dc027061262e46fe058b4e6a4c6a3"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23fe704da910ff45e72543cbba152821685a889cf00fc58d5c8ee96a9bad5f94"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f444762fed1bc1fd75187ef14a20ed900c1fbb245d45be9e834b822a0223bc81"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:681eb4d37c9a9a6eb9b3245a5e89d7f7b2b9895590bb08a20aa598c1eb0a1d9d"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8e8607d8f6b4f9d46fee11447e334d6ab50e993dd4dbfb22f674616ce20907ab"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b10556817f09d46d420edd982dd0653940b90151d0576f09143a8e773459f6fe"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e1ec8a9ee0987d4524ffd6299e778c16cc35fef6d1a2764e609f90962f0b293a"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b4126cac7d69ac06ff22efd3e0b3328a4a70624fcd6bca4fc1b4e6d9e2e12bf"}, - {file = "simplejson-3.17.6-cp38-cp38-win32.whl", hash = "sha256:35a49ebef25f1ebdef54262e54ae80904d8692367a9f208cdfbc38dbf649e00a"}, - {file = "simplejson-3.17.6-cp38-cp38-win_amd64.whl", hash = "sha256:743cd768affaa508a21499f4858c5b824ffa2e1394ed94eb85caf47ac0732198"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb62d517a516128bacf08cb6a86ecd39fb06d08e7c4980251f5d5601d29989ba"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:12133863178a8080a3dccbf5cb2edfab0001bc41e5d6d2446af2a1131105adfe"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5540fba2d437edaf4aa4fbb80f43f42a8334206ad1ad3b27aef577fd989f20d9"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d74ee72b5071818a1a5dab47338e87f08a738cb938a3b0653b9e4d959ddd1fd9"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28221620f4dcabdeac310846629b976e599a13f59abb21616356a85231ebd6ad"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b09bc62e5193e31d7f9876220fb429ec13a6a181a24d897b9edfbbdbcd678851"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7255a37ff50593c9b2f1afa8fafd6ef5763213c1ed5a9e2c6f5b9cc925ab979f"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:401d40969cee3df7bda211e57b903a534561b77a7ade0dd622a8d1a31eaa8ba7"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a649d0f66029c7eb67042b15374bd93a26aae202591d9afd71e111dd0006b198"}, - {file = "simplejson-3.17.6-cp39-cp39-win32.whl", hash = "sha256:522fad7be85de57430d6d287c4b635813932946ebf41b913fe7e880d154ade2e"}, - {file = "simplejson-3.17.6-cp39-cp39-win_amd64.whl", hash = "sha256:3fe87570168b2ae018391e2b43fbf66e8593a86feccb4b0500d134c998983ccc"}, - {file = "simplejson-3.17.6.tar.gz", hash = "sha256:cf98038d2abf63a1ada5730e91e84c642ba6c225b0198c3684151b1f80c5f8a6"}, + {file = "simplejson-3.19.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e"}, + {file = "simplejson-3.19.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5"}, + {file = "simplejson-3.19.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a"}, + {file = "simplejson-3.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061"}, + {file = "simplejson-3.19.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910"}, + {file = "simplejson-3.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900"}, + {file = "simplejson-3.19.1-cp310-cp310-win32.whl", hash = "sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a"}, + {file = "simplejson-3.19.1-cp310-cp310-win_amd64.whl", hash = "sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa"}, + {file = "simplejson-3.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9"}, + {file = "simplejson-3.19.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de"}, + {file = "simplejson-3.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc"}, + {file = "simplejson-3.19.1-cp311-cp311-win32.whl", hash = "sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb"}, + {file = "simplejson-3.19.1-cp311-cp311-win_amd64.whl", hash = "sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4"}, + {file = "simplejson-3.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f"}, + {file = "simplejson-3.19.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228"}, + {file = "simplejson-3.19.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2"}, + {file = "simplejson-3.19.1-cp36-cp36m-win32.whl", hash = "sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33"}, + {file = "simplejson-3.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3"}, + {file = "simplejson-3.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac"}, + {file = "simplejson-3.19.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430"}, + {file = "simplejson-3.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd"}, + {file = "simplejson-3.19.1-cp37-cp37m-win32.whl", hash = "sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a"}, + {file = "simplejson-3.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72"}, + {file = "simplejson-3.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86"}, + {file = "simplejson-3.19.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a"}, + {file = "simplejson-3.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a"}, + {file = "simplejson-3.19.1-cp38-cp38-win32.whl", hash = "sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92"}, + {file = "simplejson-3.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330"}, + {file = "simplejson-3.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f"}, + {file = "simplejson-3.19.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1"}, + {file = "simplejson-3.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553"}, + {file = "simplejson-3.19.1-cp39-cp39-win32.whl", hash = "sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4"}, + {file = "simplejson-3.19.1-cp39-cp39-win_amd64.whl", hash = "sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec"}, + {file = "simplejson-3.19.1-py3-none-any.whl", hash = "sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1"}, + {file = "simplejson-3.19.1.tar.gz", hash = "sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c"}, ] [[package]] @@ -2282,13 +2525,13 @@ files = [ [[package]] name = "systemd-python" -version = "234" +version = "235" description = "Python interface for libsystemd" category = "main" optional = true python-versions = "*" files = [ - {file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"}, + {file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"}, ] [[package]] @@ -2308,13 +2551,13 @@ tornado = "*" [[package]] name = "thrift" -version = "0.15.0" +version = "0.16.0" description = "Python bindings for the Apache Thrift RPC system" category = "main" optional = true python-versions = "*" files = [ - {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"}, + {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] [package.dependencies] @@ -2327,65 +2570,35 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "1.2.3" +version = "2.0.1" description = "A lil' TOML parser" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, - {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] [[package]] name = "tornado" -version = "6.1" +version = "6.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." category = "main" optional = true -python-versions = ">= 3.5" -files = [ - {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, - {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, - {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, - {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, - {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, - {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, - {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, - {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, - {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, - {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, - {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, - {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, - {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, - {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, - {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, - {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, - {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, - {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, +python-versions = ">= 3.7" +files = [ + {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, + {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba09ef14ca9893954244fd872798b4ccb2367c165946ce2dd7376aebdde8e3ac"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8150f721c101abdef99073bf66d3903e292d851bee51910839831caba341a75"}, + {file = "tornado-6.2-cp37-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3a2f5999215a3a06a4fc218026cd84c61b8b2b40ac5296a6db1f1451ef04c1e"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c52d219d4995388119af7ccaa0bcec289535747620116a58d830e7c25d8a8"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_i686.whl", hash = "sha256:6fdfabffd8dfcb6cf887428849d30cf19a3ea34c2c248461e1f7d718ad30b66b"}, + {file = "tornado-6.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:1d54d13ab8414ed44de07efecb97d4ef7c39f7438cf5e976ccd356bebb1b5fca"}, + {file = "tornado-6.2-cp37-abi3-win32.whl", hash = "sha256:5c87076709343557ef8032934ce5f637dbb552efa7b21d08e89ae7619ed0eb23"}, + {file = "tornado-6.2-cp37-abi3-win_amd64.whl", hash = "sha256:e5f923aa6a47e133d1cf87d60700889d7eae68988704e20c75fb2d65677a8e4b"}, + {file = "tornado-6.2.tar.gz", hash = "sha256:9b630419bde84ec666bfd7ea0a4cb2a8a651c2d5cccdbdd1972a0c859dfc3c13"}, ] [[package]] @@ -2501,36 +2714,40 @@ windows-platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0. [[package]] name = "twisted-iocpsupport" -version = "1.0.2" +version = "1.0.3" description = "An extension for use in the twisted I/O Completion Ports reactor." category = "main" optional = false python-versions = "*" files = [ - {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"}, - {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"}, + {file = "twisted-iocpsupport-1.0.3.tar.gz", hash = "sha256:afb00801fdfbaccf0d0173a722626500023d4a19719ac9f129d1347a32e2fc66"}, + {file = "twisted_iocpsupport-1.0.3-cp310-cp310-win32.whl", hash = "sha256:a379ef56a576c8090889f74441bc3822ca31ac82253cc61e8d50631bcb0c26d0"}, + {file = "twisted_iocpsupport-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:1ea2c3fbdb739c95cc8b3355305cd593d2c9ec56d709207aa1a05d4d98671e85"}, + {file = "twisted_iocpsupport-1.0.3-cp311-cp311-win32.whl", hash = "sha256:7efcdfafb377f32db90f42bd5fc5bb32cd1e3637ee936cdaf3aff4f4786ab3bf"}, + {file = "twisted_iocpsupport-1.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1dbfac706972bf9ec5ce1ddbc735d2ebba406ad363345df8751ffd5252aa1618"}, + {file = "twisted_iocpsupport-1.0.3-cp36-cp36m-win32.whl", hash = "sha256:1ddfc5fa22ec6f913464b736b3f46e642237f17ac41be47eed6fa9bd52f5d0e0"}, + {file = "twisted_iocpsupport-1.0.3-cp36-cp36m-win_amd64.whl", hash = "sha256:1bdccbb22199fc69fd7744d6d2dfd22d073c028c8611d994b41d2d2ad0e0f40d"}, + {file = "twisted_iocpsupport-1.0.3-cp37-cp37m-win32.whl", hash = "sha256:db11c80054b52dbdea44d63d5474a44c9a6531882f0e2960268b15123088641a"}, + {file = "twisted_iocpsupport-1.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:67bec1716eb8f466ef366bbf262e1467ecc9e20940111207663ac24049785bad"}, + {file = "twisted_iocpsupport-1.0.3-cp38-cp38-win32.whl", hash = "sha256:98a6f16ab215f8c1446e9fc60aaed0ab7c746d566aa2f3492a23cea334e6bebb"}, + {file = "twisted_iocpsupport-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:4f249d0baac836bb431d6fa0178be063a310136bc489465a831e3abd2d7acafd"}, + {file = "twisted_iocpsupport-1.0.3-cp39-cp39-win32.whl", hash = "sha256:aaca8f30c3b7c80d27a33fe9fe0d0bac42b1b012ddc60f677175c30e1becc1f3"}, + {file = "twisted_iocpsupport-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:dff43136c33665c2d117a73706aef6f7d6433e5c4560332a118fe066b16b8695"}, + {file = "twisted_iocpsupport-1.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:8faceae553cfadc42ad791b1790e7cdecb7751102608c405217f6a26e877e0c5"}, + {file = "twisted_iocpsupport-1.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6f8c433faaad5d53d30d1da6968d5a3730df415e2efb6864847267a9b51290cd"}, + {file = "twisted_iocpsupport-1.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3f39c41c0213a81a9ce0961e30d0d7650f371ad80f8d261007d15a2deb6d5be3"}, ] [[package]] name = "txredisapi" -version = "1.4.7" +version = "1.4.9" description = "non-blocking redis client for python" category = "main" optional = true python-versions = "*" files = [ - {file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"}, - {file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"}, + {file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"}, + {file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"}, ] [package.dependencies] @@ -2539,199 +2756,159 @@ twisted = "*" [[package]] name = "typed-ast" -version = "1.5.2" +version = "1.5.4" description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false python-versions = ">=3.6" files = [ - {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, - {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"}, - {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"}, - {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"}, - {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"}, - {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"}, - {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"}, - {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"}, - {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"}, - {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, ] [[package]] name = "types-bleach" -version = "5.0.3.1" +version = "6.0.0.2" description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"}, - {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"}, + {file = "types-bleach-6.0.0.2.tar.gz", hash = "sha256:786b636d2628dc91bcddb4f3a6d50f6df59ad1a884540021d86b2ac9c1431101"}, + {file = "types_bleach-6.0.0.2-py3-none-any.whl", hash = "sha256:b62140152a74417b409aa67d7ff50ee7fb8d9681fbe9e46c5ff9b6f969f3246f"}, ] [[package]] name = "types-commonmark" -version = "0.9.2" +version = "0.9.2.2" description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, - {file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"}, + {file = "types-commonmark-0.9.2.2.tar.gz", hash = "sha256:f3259350634c2ce68ae503398430482f7cf44e5cae3d344995e916fbf453b4be"}, + {file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"}, ] [[package]] -name = "types-cryptography" -version = "3.3.15" -description = "Typing stubs for cryptography" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"}, - {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"}, -] - -[package.dependencies] -types-enum34 = "*" -types-ipaddress = "*" - -[[package]] -name = "types-docutils" -version = "0.19.1.1" -description = "Typing stubs for docutils" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"}, - {file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"}, -] - -[[package]] -name = "types-enum34" -version = "1.1.8" -description = "Typing stubs for enum34" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"}, - {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"}, -] - -[[package]] -name = "types-ipaddress" -version = "1.0.8" -description = "Typing stubs for ipaddress" +name = "types-jsonschema" +version = "4.17.0.7" +description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"}, - {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, + {file = "types-jsonschema-4.17.0.7.tar.gz", hash = "sha256:130e57c5f1ca755f95775d0822ad7a3907294e1461306af54baf804f317fd54c"}, + {file = "types_jsonschema-4.17.0.7-py3-none-any.whl", hash = "sha256:e129b52be6df841d97a98f087631dd558f7812eb91ff7b733c3301bd2446271b"}, ] [[package]] -name = "types-jsonschema" -version = "4.17.0.2" -description = "Typing stubs for jsonschema" +name = "types-netaddr" +version = "0.8.0.7" +description = "Typing stubs for netaddr" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.2.tar.gz", hash = "sha256:8b9e1140d4d780f0f19b5cab1b8a3732e8dd5e49dbc1f174cc0b499125ca6f6c"}, - {file = "types_jsonschema-4.17.0.2-py3-none-any.whl", hash = "sha256:8fd2f9aea4da54f9a811baa6963aac10fd680c18baa6237392c079b97d152738"}, + {file = "types-netaddr-0.8.0.7.tar.gz", hash = "sha256:3362864fa0258782d449b91707f37e55f62290b4f438974a08758b498169e109"}, + {file = "types_netaddr-0.8.0.7-py3-none-any.whl", hash = "sha256:a540cdfb2f858a0509ce5a4e4fcc80ef11b19f10a2473e48d32217af517818c0"}, ] [[package]] name = "types-opentracing" -version = "2.4.10" +version = "2.4.10.4" description = "Typing stubs for opentracing" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, - {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, + {file = "types-opentracing-2.4.10.4.tar.gz", hash = "sha256:347040c9da4ada7d3c795659912c95d98c5651e242e8eaa0344815fee5bb97e2"}, + {file = "types_opentracing-2.4.10.4-py3-none-any.whl", hash = "sha256:73c9b958eea3df6c4906ebf3865608a562dd9981c1bbc75a373a583c613bed56"}, ] [[package]] name = "types-pillow" -version = "9.4.0.0" +version = "9.4.0.19" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.0.tar.gz", hash = "sha256:ef8a823638ceb765a144a98a2f816b8912da0337c5c2556d33774f1434f9918c"}, - {file = "types_Pillow-9.4.0.0-py3-none-any.whl", hash = "sha256:246f0dc52d575ef64e01f06f41be37a492b542ee3180638a7b874a6dd4d48c01"}, + {file = "types-Pillow-9.4.0.19.tar.gz", hash = "sha256:a04401181979049977e318dae4523ab5ae8246314fc68fcf50b043ac885a5468"}, + {file = "types_Pillow-9.4.0.19-py3-none-any.whl", hash = "sha256:b55f2508be21e68a39f0a41830f1f1725aba0888e727e2eccd253c78cd5357a5"}, ] [[package]] name = "types-psycopg2" -version = "2.9.21.2" +version = "2.9.21.9" description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"}, - {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"}, + {file = "types-psycopg2-2.9.21.9.tar.gz", hash = "sha256:388dc36a04551632289c4aaf1fc5b91e147654b165db896d094844e216f22bf5"}, + {file = "types_psycopg2-2.9.21.9-py3-none-any.whl", hash = "sha256:0332525fb9d3031d3da46f091e7d40b2c4d4958e9c00d2b4c1eaaa9f8ef9de4e"}, ] [[package]] name = "types-pyopenssl" -version = "22.1.0.2" +version = "23.1.0.1" description = "Typing stubs for pyOpenSSL" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, - {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"}, + {file = "types-pyOpenSSL-23.1.0.1.tar.gz", hash = "sha256:59044283c475eaa5a29b36a903c123d52bdf4a7e012f0a1ca0e41115b99216da"}, + {file = "types_pyOpenSSL-23.1.0.1-py3-none-any.whl", hash = "sha256:ac7fbc240930c2f9a1cbd2d04f9cb14ad0f15b0ad8d6528732a83747b1b2086e"}, ] [package.dependencies] -types-cryptography = "*" +cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" -version = "6.0.12.2" +version = "6.0.12.9" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, - {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, + {file = "types-PyYAML-6.0.12.9.tar.gz", hash = "sha256:c51b1bd6d99ddf0aa2884a7a328810ebf70a4262c292195d3f4f9a0005f9eeb6"}, + {file = "types_PyYAML-6.0.12.9-py3-none-any.whl", hash = "sha256:5aed5aa66bd2d2e158f75dda22b059570ede988559f030cf294871d3b647e3e8"}, ] [[package]] name = "types-requests" -version = "2.28.11.7" +version = "2.28.11.17" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.28.11.7.tar.gz", hash = "sha256:0ae38633734990d019b80f5463dfa164ebd3581998ac8435f526da6fe4d598c3"}, - {file = "types_requests-2.28.11.7-py3-none-any.whl", hash = "sha256:b6a2fca8109f4fdba33052f11ed86102bddb2338519e1827387137fefc66a98b"}, + {file = "types-requests-2.28.11.17.tar.gz", hash = "sha256:0d580652ce903f643f8c3b494dd01d29367ea57cea0c7ad7f65cf3169092edb0"}, + {file = "types_requests-2.28.11.17-py3-none-any.whl", hash = "sha256:cc1aba862575019306b2ed134eb1ea994cab1c887a22e18d3383e6dd42e9789b"}, ] [package.dependencies] @@ -2739,41 +2916,38 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "65.6.0.3" +version = "67.6.0.7" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-65.6.0.3.tar.gz", hash = "sha256:7ddd7415282fa97ab18e490206067c0cdb126b103743e72ee86783d7af6481c5"}, - {file = "types_setuptools-65.6.0.3-py3-none-any.whl", hash = "sha256:ad729fc3a9a3946f73915eaab16ce56b30ed5ae998479253d809d76b3889ee09"}, + {file = "types-setuptools-67.6.0.7.tar.gz", hash = "sha256:f46b11773b1aeddbd2ef32fd6a6091ef33aa9b32daa124f6ce63f616de59ae51"}, + {file = "types_setuptools-67.6.0.7-py3-none-any.whl", hash = "sha256:ea2873dc8dd9e8421929dc50617ac7c2054c9a873942c5b5b606e2effef5db12"}, ] -[package.dependencies] -types-docutils = "*" - [[package]] name = "types-urllib3" -version = "1.26.10" +version = "1.26.25.10" description = "Typing stubs for urllib3" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, - {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"}, + {file = "types-urllib3-1.26.25.10.tar.gz", hash = "sha256:c44881cde9fc8256d05ad6b21f50c4681eb20092552351570ab0a8a0653286d6"}, + {file = "types_urllib3-1.26.25.10-py3-none-any.whl", hash = "sha256:12c744609d588340a07e45d333bf870069fc8793bcf96bae7a96d4712a42591d"}, ] [[package]] name = "typing-extensions" -version = "4.4.0" +version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, - {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, + {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, ] [[package]] @@ -2790,14 +2964,14 @@ files = [ [[package]] name = "urllib3" -version = "1.26.12" +version = "1.26.15" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, - {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, + {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"}, + {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"}, ] [package.extras] @@ -2819,124 +2993,135 @@ files = [ [[package]] name = "wrapt" -version = "1.14.1" +version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ - {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, - {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, - {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, - {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, - {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, - {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, - {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, - {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, - {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, - {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, - {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, - {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, - {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, - {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, - {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, - {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, + {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"}, + {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"}, + {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"}, + {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"}, + {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"}, + {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"}, + {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"}, + {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"}, + {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"}, + {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"}, + {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"}, + {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"}, + {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"}, + {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"}, + {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"}, + {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"}, + {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"}, + {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"}, + {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"}, + {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"}, + {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"}, + {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"}, + {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"}, + {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"}, + {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"}, + {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"}, + {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"}, + {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"}, + {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"}, + {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"}, + {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"}, + {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"}, + {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"}, + {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"}, + {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"}, + {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"}, + {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"}, + {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"}, ] [[package]] name = "xmlschema" -version = "1.10.0" +version = "2.2.2" description = "An XML Schema validator and decoder" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"}, - {file = "xmlschema-1.10.0.tar.gz", hash = "sha256:be1eedce6a4b911fd3a7f4060d0811951820a13410e61f0454b30e9f4e7cf197"}, + {file = "xmlschema-2.2.2-py3-none-any.whl", hash = "sha256:557f3632b54b6ff10576736bba62e43db84eb60f6465a83818576cd9ffcc1799"}, + {file = "xmlschema-2.2.2.tar.gz", hash = "sha256:0caa96668807b4b51c42a0fe2b6610752bc59f069615df3e34dcfffb962973fd"}, ] [package.dependencies] -elementpath = ">=2.5.0,<3.0.0" +elementpath = ">=4.0.0,<5.0.0" [package.extras] -codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"] -dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"] -docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"] +codegen = ["elementpath (>=4.0.0,<5.0.0)", "jinja2"] +dev = ["Sphinx", "coverage", "elementpath (>=4.0.0,<5.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"] +docs = ["Sphinx", "elementpath (>=4.0.0,<5.0.0)", "jinja2", "sphinx-rtd-theme"] [[package]] name = "zipp" -version = "3.7.0" +version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, ] [package.extras] -docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] -testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [[package]] -name = "zope.event" -version = "4.5.0" +name = "zope-event" +version = "4.6" description = "Very basic event publishing system" category = "dev" optional = false python-versions = "*" files = [ - {file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"}, - {file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"}, + {file = "zope.event-4.6-py2.py3-none-any.whl", hash = "sha256:73d9e3ef750cca14816a9c322c7250b0d7c9dbc337df5d1b807ff8d3d0b9e97c"}, + {file = "zope.event-4.6.tar.gz", hash = "sha256:81d98813046fc86cc4136e3698fee628a3282f9c320db18658c21749235fce80"}, ] [package.dependencies] @@ -2947,64 +3132,43 @@ docs = ["Sphinx"] test = ["zope.testrunner"] [[package]] -name = "zope.interface" -version = "5.4.0" +name = "zope-interface" +version = "6.0" description = "Interfaces for Python" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" files = [ - {file = "zope.interface-5.4.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:08f9636e99a9d5410181ba0729e0408d3d8748026ea938f3b970a0249daa8192"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ea1d73b7c9dcbc5080bb8aaffb776f1c68e807767069b9ccdd06f27a161914a"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:273f158fabc5ea33cbc936da0ab3d4ba80ede5351babc4f577d768e057651531"}, - {file = "zope.interface-5.4.0-cp27-cp27m-win32.whl", hash = "sha256:a1e6e96217a0f72e2b8629e271e1b280c6fa3fe6e59fa8f6701bec14e3354325"}, - {file = "zope.interface-5.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:877473e675fdcc113c138813a5dd440da0769a2d81f4d86614e5d62b69497155"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f7ee479e96f7ee350db1cf24afa5685a5899e2b34992fb99e1f7c1b0b758d263"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b0297b1e05fd128d26cc2460c810d42e205d16d76799526dfa8c8ccd50e74959"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:af310ec8335016b5e52cae60cda4a4f2a60a788cbb949a4fbea13d441aa5a09e"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9a9845c4c6bb56e508651f005c4aeb0404e518c6f000d5a1123ab077ab769f5c"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0b465ae0962d49c68aa9733ba92a001b2a0933c317780435f00be7ecb959c702"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5dd9ca406499444f4c8299f803d4a14edf7890ecc595c8b1c7115c2342cadc5f"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:469e2407e0fe9880ac690a3666f03eb4c3c444411a5a5fddfdabc5d184a79f05"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:52de7fc6c21b419078008f697fd4103dbc763288b1406b4562554bd47514c004"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:3dd4952748521205697bc2802e4afac5ed4b02909bb799ba1fe239f77fd4e117"}, - {file = "zope.interface-5.4.0-cp35-cp35m-win32.whl", hash = "sha256:dd93ea5c0c7f3e25335ab7d22a507b1dc43976e1345508f845efc573d3d779d8"}, - {file = "zope.interface-5.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3748fac0d0f6a304e674955ab1365d515993b3a0a865e16a11ec9d86fb307f63"}, - {file = "zope.interface-5.4.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:66c0061c91b3b9cf542131148ef7ecbecb2690d48d1612ec386de9d36766058f"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d0c1bc2fa9a7285719e5678584f6b92572a5b639d0e471bb8d4b650a1a910920"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2876246527c91e101184f63ccd1d716ec9c46519cc5f3d5375a3351c46467c46"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:334701327f37c47fa628fc8b8d28c7d7730ce7daaf4bda1efb741679c2b087fc"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:71aace0c42d53abe6fc7f726c5d3b60d90f3c5c055a447950ad6ea9cec2e37d9"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5bb3489b4558e49ad2c5118137cfeaf59434f9737fa9c5deefc72d22c23822e2"}, - {file = "zope.interface-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:1c0e316c9add0db48a5b703833881351444398b04111188069a26a61cfb4df78"}, - {file = "zope.interface-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f0c02cbb9691b7c91d5009108f975f8ffeab5dff8f26d62e21c493060eff2a1"}, - {file = "zope.interface-5.4.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:7d97a4306898b05404a0dcdc32d9709b7d8832c0c542b861d9a826301719794e"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:867a5ad16892bf20e6c4ea2aab1971f45645ff3102ad29bd84c86027fa99997b"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5f931a1c21dfa7a9c573ec1f50a31135ccce84e32507c54e1ea404894c5eb96f"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:194d0bcb1374ac3e1e023961610dc8f2c78a0f5f634d0c737691e215569e640d"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8270252effc60b9642b423189a2fe90eb6b59e87cbee54549db3f5562ff8d1b8"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:15e7d1f7a6ee16572e21e3576d2012b2778cbacf75eb4b7400be37455f5ca8bf"}, - {file = "zope.interface-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:8892f89999ffd992208754851e5a052f6b5db70a1e3f7d54b17c5211e37a98c7"}, - {file = "zope.interface-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2e5a26f16503be6c826abca904e45f1a44ff275fdb7e9d1b75c10671c26f8b94"}, - {file = "zope.interface-5.4.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0f91b5b948686659a8e28b728ff5e74b1be6bf40cb04704453617e5f1e945ef3"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:4de4bc9b6d35c5af65b454d3e9bc98c50eb3960d5a3762c9438df57427134b8e"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bf68f4b2b6683e52bec69273562df15af352e5ed25d1b6641e7efddc5951d1a7"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63b82bb63de7c821428d513607e84c6d97d58afd1fe2eb645030bdc185440120"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:db1fa631737dab9fa0b37f3979d8d2631e348c3b4e8325d6873c2541d0ae5a48"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f44e517131a98f7a76696a7b21b164bcb85291cee106a23beccce454e1f433a4"}, - {file = "zope.interface-5.4.0-cp38-cp38-win32.whl", hash = "sha256:a9506a7e80bcf6eacfff7f804c0ad5350c8c95b9010e4356a4b36f5322f09abb"}, - {file = "zope.interface-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:3c02411a3b62668200910090a0dff17c0b25aaa36145082a5a6adf08fa281e54"}, - {file = "zope.interface-5.4.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0cee5187b60ed26d56eb2960136288ce91bcf61e2a9405660d271d1f122a69a4"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a8156e6a7f5e2a0ff0c5b21d6bcb45145efece1909efcbbbf48c56f8da68221d"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:205e40ccde0f37496904572035deea747390a8b7dc65146d30b96e2dd1359a83"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3f24df7124c323fceb53ff6168da70dbfbae1442b4f3da439cd441681f54fe25"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5208ebd5152e040640518a77827bdfcc73773a15a33d6644015b763b9c9febc1"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:17776ecd3a1fdd2b2cd5373e5ef8b307162f581c693575ec62e7c5399d80794c"}, - {file = "zope.interface-5.4.0-cp39-cp39-win32.whl", hash = "sha256:d4d9d6c1a455d4babd320203b918ccc7fcbefe308615c521062bc2ba1aa4d26e"}, - {file = "zope.interface-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:0cba8477e300d64a11a9789ed40ee8932b59f9ee05f85276dbb4b59acee5dd09"}, - {file = "zope.interface-5.4.0.tar.gz", hash = "sha256:5dba5f530fec3f0988d83b78cc591b58c0b6eb8431a85edd1569a0539a8a5a0e"}, + {file = "zope.interface-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990"}, + {file = "zope.interface-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d"}, + {file = "zope.interface-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85"}, + {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995"}, + {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f"}, + {file = "zope.interface-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410"}, + {file = "zope.interface-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28"}, + {file = "zope.interface-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52"}, + {file = "zope.interface-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30"}, + {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464"}, + {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518"}, + {file = "zope.interface-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb"}, + {file = "zope.interface-6.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788"}, + {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca"}, + {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a"}, + {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc"}, + {file = "zope.interface-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373"}, + {file = "zope.interface-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f"}, + {file = "zope.interface-6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8"}, + {file = "zope.interface-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58"}, + {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446"}, + {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f"}, + {file = "zope.interface-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8"}, + {file = "zope.interface-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2"}, + {file = "zope.interface-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c"}, + {file = "zope.interface-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5"}, + {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8"}, + {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2"}, + {file = "zope.interface-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5"}, + {file = "zope.interface-6.0.tar.gz", hash = "sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d"}, ] [package.dependencies] @@ -3016,15 +3180,15 @@ test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [[package]] -name = "zope.schema" -version = "6.2.0" +name = "zope-schema" +version = "7.0.1" description = "zope.interface extension for defining data schemas" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.7" files = [ - {file = "zope.schema-6.2.0-py2.py3-none-any.whl", hash = "sha256:03150d8670549590b45109e06b7b964f4e751fa9cb5297ec4985c3bc38641b07"}, - {file = "zope.schema-6.2.0.tar.gz", hash = "sha256:2201aef8ad75ee5a881284d7a6acd384661d6dca7bde5e80a22839a77124595b"}, + {file = "zope.schema-7.0.1-py3-none-any.whl", hash = "sha256:cf006c678793b00e0075ad54d55281c8785ea21e5bc1f5ec0584787719c2aab2"}, + {file = "zope.schema-7.0.1.tar.gz", hash = "sha256:ead4dbcb03354d4e410c9a3b904451eb44d90254751b1cbdedf4a61aede9fbb9"}, ] [package.dependencies] @@ -3037,22 +3201,22 @@ docs = ["Sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "jump-consistent-hash", "Pympler", "pyicu"] +all = ["Pympler", "authlib", "hiredis", "jaeger-client", "jump-consistent-hash", "lxml", "matrix-synapse-ldap3", "opentracing", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pyicu", "pysaml2", "sentry-sdk", "txredisapi"] cache-memory = ["Pympler"] jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] postgres = ["psycopg2", "psycopg2cffi", "psycopg2cffi-compat"] -redis = ["txredisapi", "hiredis", "jump-consistent-hash"] +redis = ["hiredis", "jump-consistent-hash", "txredisapi"] saml2 = ["pysaml2"] sentry = ["sentry-sdk"] systemd = ["systemd-python"] -test = ["parameterized", "idna"] +test = ["idna", "parameterized"] url-preview = ["lxml"] user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "9c59d02c5cd5fd48cb507b94091f6c12c407a830448e29a45fdfb77b14235f47" +content-hash = "8d770abbf23d8c3396a83b5a23200122fcb8a6365a300f468f3da4eb2bef00de" diff --git a/pyproject.toml b/pyproject.toml index 88884dce3d5e..9a68159f6aaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.77.0" +version = "1.80.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" @@ -154,7 +154,9 @@ python = "^3.7.1" # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 jsonschema = ">=3.0.0" # frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41 -frozendict = ">=1,!=2.1.2" +# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this +# because frozendict has been more trouble than it's worth; we would like to move to immutabledict. +frozendict = ">=1,!=2.1.2,<2.3.5" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" # We require 1.5.0 to work around an issue when running against the C implementation of @@ -312,7 +314,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.230" +ruff = "0.0.252" # Typechecking mypy = "*" @@ -320,6 +322,7 @@ mypy-zope = "*" types-bleach = ">=4.1.0" types-commonmark = ">=0.9.2" types-jsonschema = ">=3.2.0" +types-netaddr = ">=0.8.0.6" types-opentracing = ">=2.4.2" types-Pillow = ">=8.3.4" types-psycopg2 = ">=2.9.9" @@ -347,6 +350,9 @@ twine = "*" # Towncrier min version comes from #3425. Rationale unclear. towncrier = ">=18.6.0rc1" +# Used for checking the Poetry lockfile +tomli = ">=1.2.3" + [build-system] # The upper bounds here are defensive, intended to prevent situations like # #13849 and #14079 where we see buildtime or runtime errors caused by build diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 09e2bba5e5c2..533a8cc6776f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -24,7 +24,7 @@ anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] } -pyo3-log = "0.7.0" +pyo3-log = "0.8.1" pythonize = "0.17.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 35f7a50bcea0..64e13f6486e8 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -14,8 +14,10 @@ #![feature(test)] use std::collections::BTreeSet; + use synapse::push::{ - evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules, + evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, + PushRules, SimpleJsonValue, }; use test::Bencher; @@ -24,9 +26,18 @@ extern crate test; #[bench] fn bench_match_exact(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + ), + ( + "room_id".to_string(), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + ), + ( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + ), ] .into_iter() .collect(); @@ -34,8 +45,6 @@ fn bench_match_exact(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -49,8 +58,7 @@ fn bench_match_exact(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "room_id".into(), - pattern: Some("!room:server".into()), - pattern_type: None, + pattern: "!room:server".into(), }, )); @@ -63,9 +71,18 @@ fn bench_match_exact(b: &mut Bencher) { #[bench] fn bench_match_word(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + ), + ( + "room_id".to_string(), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + ), + ( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + ), ] .into_iter() .collect(); @@ -73,8 +90,6 @@ fn bench_match_word(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -88,8 +103,7 @@ fn bench_match_word(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "content.body".into(), - pattern: Some("test".into()), - pattern_type: None, + pattern: "test".into(), }, )); @@ -102,9 +116,18 @@ fn bench_match_word(b: &mut Bencher) { #[bench] fn bench_match_word_miss(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + ), + ( + "room_id".to_string(), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + ), + ( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + ), ] .into_iter() .collect(); @@ -112,8 +135,6 @@ fn bench_match_word_miss(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -127,8 +148,7 @@ fn bench_match_word_miss(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "content.body".into(), - pattern: Some("foobar".into()), - pattern_type: None, + pattern: "foobar".into(), }, )); @@ -141,9 +161,18 @@ fn bench_match_word_miss(b: &mut Bencher) { #[bench] fn bench_eval_message(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), + ), + ( + "room_id".to_string(), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), + ), + ( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), + ), ] .into_iter() .collect(); @@ -151,8 +180,6 @@ fn bench_eval_message(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), Default::default(), diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index c2d15a4285c8..154f0f224d64 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -21,13 +21,13 @@ use lazy_static::lazy_static; use serde_json::Value; use super::KnownCondition; -use crate::push::Action; -use crate::push::Condition; -use crate::push::EventMatchCondition; -use crate::push::PushRule; -use crate::push::RelatedEventMatchCondition; +use crate::push::RelatedEventMatchTypeCondition; use crate::push::SetTweak; use crate::push::TweakValue; +use crate::push::{Action, EventPropertyIsCondition, SimpleJsonValue}; +use crate::push::{Condition, EventMatchTypeCondition}; +use crate::push::{EventMatchCondition, EventMatchPatternType}; +use crate::push::{EventPropertyIsTypeCondition, PushRule}; const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { set_tweak: Cow::Borrowed("highlight"), @@ -72,23 +72,19 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.member")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.member"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.membership"), - pattern: Some(Cow::Borrowed("invite")), - pattern_type: None, + pattern: Cow::Borrowed("invite"), })), - Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition { key: Cow::Borrowed("state_key"), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), })), - Condition::Known(KnownCondition::EventMatch(EventMatchCondition { - key: Cow::Borrowed("content.fi.mau.will_auto_accept"), - pattern: Some(Cow::Borrowed("*")), - pattern_type: None, + Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { + key: Cow::Borrowed("content.fi\\.mau\\.will_auto_accept"), + value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), })), ]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -103,9 +99,8 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { - key: Cow::Borrowed("content.m.relates_to.rel_type"), - pattern: Some(Cow::Borrowed("m.replace")), - pattern_type: None, + key: Cow::Borrowed("content.m\\.relates_to.rel_type"), + pattern: Cow::Borrowed("m.replace"), }, ))]), actions: Cow::Borrowed(&[]), @@ -118,8 +113,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("com.beeper.message_send_status")), - pattern_type: None, + pattern: Cow::Borrowed("com.beeper.message_send_status"), })), ]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -132,8 +126,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("cm.room.power_levels")), - pattern_type: None, + pattern: Cow::Borrowed("cm.room.power_levels"), })), ]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -146,18 +139,15 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.member")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.member"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.membership"), - pattern: Some(Cow::Borrowed("invite")), - pattern_type: None, + pattern: Cow::Borrowed("invite"), })), - Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition { key: Cow::Borrowed("state_key"), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]), @@ -170,8 +160,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.member")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.member"), }, ))]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -181,11 +170,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ PushRule { rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"), priority_class: 5, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch( - RelatedEventMatchCondition { - key: Some(Cow::Borrowed("sender")), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatchType( + RelatedEventMatchTypeCondition { + key: Cow::Borrowed("sender"), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), rel_type: Cow::Borrowed("m.in_reply_to"), include_fallbacks: None, }, @@ -197,7 +185,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ PushRule { rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), priority_class: 5, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]), + conditions: Cow::Borrowed(&[Condition::Known( + KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { + key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"), + value_type: Cow::Borrowed(&EventMatchPatternType::UserId), + }), + )]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), default: true, default_enabled: true, @@ -214,7 +207,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ - Condition::Known(KnownCondition::IsRoomMention), + Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { + key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"), + value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), + })), Condition::Known(KnownCondition::SenderNotificationPermission { key: Cow::Borrowed("room"), }), @@ -232,8 +228,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.body"), - pattern: Some(Cow::Borrowed("@room")), - pattern_type: None, + pattern: Cow::Borrowed("@room"), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), @@ -246,13 +241,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.tombstone")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.tombstone"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("")), - pattern_type: None, + pattern: Cow::Borrowed(""), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), @@ -269,13 +262,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.reaction")), - pattern_type: None, + pattern: Cow::Borrowed("m.reaction"), })), - Condition::Known(KnownCondition::InverseRelatedEventMatch(RelatedEventMatchCondition { - key: Some(Cow::Borrowed("sender")), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + Condition::Known(KnownCondition::InverseRelatedEventMatchType(RelatedEventMatchTypeCondition { + key: Cow::Borrowed("sender"), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), rel_type: Cow::Borrowed("m.annotation"), include_fallbacks: None, })), @@ -290,13 +281,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.server_acl")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.server_acl"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("")), - pattern_type: None, + pattern: Cow::Borrowed(""), })), ]), actions: Cow::Borrowed(&[]), @@ -309,8 +298,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.response"), }, ))]), actions: Cow::Borrowed(&[]), @@ -322,11 +310,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule { rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"), priority_class: 4, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( - EventMatchCondition { + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatchType( + EventMatchTypeCondition { key: Cow::Borrowed("content.body"), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_localpart")), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserLocalpart), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), @@ -344,8 +331,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("content.msgtype"), - pattern: Some(Cow::Borrowed("m.notice")), - pattern_type: None, + pattern: Cow::Borrowed("m.notice"), }, ))]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -358,8 +344,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.call.invite")), - pattern_type: None, + pattern: Cow::Borrowed("m.call.invite"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]), @@ -372,8 +357,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.message"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -389,8 +373,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.encrypted"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -409,8 +392,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.encrypted"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -434,8 +416,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.message"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -459,8 +440,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.file"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -484,8 +464,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.image"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -509,8 +488,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.video"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -534,8 +512,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.audio"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -556,8 +533,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.message"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -570,8 +546,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.encrypted"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -585,8 +560,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.encrypted"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -605,8 +579,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.message"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -625,8 +598,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.file")), - pattern_type: None, + pattern: Cow::Borrowed("m.file"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -645,8 +617,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.image")), - pattern_type: None, + pattern: Cow::Borrowed("m.image"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -665,8 +636,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.video")), - pattern_type: None, + pattern: Cow::Borrowed("m.video"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -685,8 +655,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.audio")), - pattern_type: None, + pattern: Cow::Borrowed("m.audio"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -704,18 +673,15 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("im.vector.modular.widgets")), - pattern_type: None, + pattern: Cow::Borrowed("im.vector.modular.widgets"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.type"), - pattern: Some(Cow::Borrowed("jitsi")), - pattern_type: None, + pattern: Cow::Borrowed("jitsi"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("*")), - pattern_type: None, + pattern: Cow::Borrowed("*"), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -730,16 +696,14 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.reaction")), - pattern_type: None, + pattern: Cow::Borrowed("m.reaction"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("<20")), }), - Condition::Known(KnownCondition::RelatedEventMatch(RelatedEventMatchCondition { - key: Some(Cow::Borrowed("sender")), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + Condition::Known(KnownCondition::RelatedEventMatchType(RelatedEventMatchTypeCondition { + key: Cow::Borrowed("sender"), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), rel_type: Cow::Borrowed("m.annotation"), include_fallbacks: None, })), @@ -757,8 +721,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"), })), ]), actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), @@ -771,8 +734,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"), }, ))]), actions: Cow::Borrowed(&[Action::Notify]), @@ -788,8 +750,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"), })), ]), actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), @@ -802,8 +763,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"), }, ))]), actions: Cow::Borrowed(&[Action::Notify]), diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 78560e34a1f2..8b86c7ceac33 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -12,7 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::{BTreeMap, BTreeSet}; +use std::borrow::Cow; +use std::collections::BTreeMap; use anyhow::{Context, Error}; use lazy_static::lazy_static; @@ -22,9 +23,10 @@ use regex::Regex; use super::{ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType}, - Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition, - RelatedEventMatchCondition, + Action, Condition, EventPropertyIsCondition, FilteredPushRules, KnownCondition, + SimpleJsonValue, }; +use crate::push::{EventMatchPatternType, JsonValue}; lazy_static! { /// Used to parse the `is` clause in the room member count condition. @@ -61,19 +63,15 @@ impl RoomVersionFeatures { /// Allows running a set of push rules against a particular event. #[pyclass] pub struct PushRuleEvaluator { - /// A mapping of "flattened" keys to string values in the event, e.g. + /// A mapping of "flattened" keys to simple JSON values in the event, e.g. /// includes things like "type" and "content.msgtype". - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, /// The "content.body", if any. body: String, /// True if the event has a mentions property and MSC3952 support is enabled. has_mentions: bool, - /// The user mentions that were part of the message. - user_mentions: BTreeSet, - /// True if the message is a room message. - room_mention: bool, /// The number of users in the room. room_member_count: u64, @@ -87,7 +85,7 @@ pub struct PushRuleEvaluator { /// The related events, indexed by relation type. Flattened in the same manner as /// `flattened_keys`. - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, /// If msc3664, push rules for related events, is enabled. related_event_match_enabled: bool, @@ -106,29 +104,25 @@ impl PushRuleEvaluator { #[allow(clippy::too_many_arguments)] #[new] pub fn py_new( - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, has_mentions: bool, - user_mentions: BTreeSet, - room_mention: bool, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, ) -> Result { - let body = flattened_keys - .get("content.body") - .cloned() - .unwrap_or_default(); + let body = match flattened_keys.get("content.body") { + Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(), + _ => String::new(), + }; Ok(PushRuleEvaluator { flattened_keys, body, has_mentions, - user_mentions, - room_mention, room_member_count, notification_power_levels, sender_power_level, @@ -249,23 +243,112 @@ impl PushRuleEvaluator { }; let result = match known_condition { - KnownCondition::EventMatch(event_match) => { - self.match_event_match(event_match, user_id)? + KnownCondition::EventMatch(event_match) => self.match_event_match( + &self.flattened_keys, + &event_match.key, + &event_match.pattern, + )?, + KnownCondition::EventMatchType(event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + let pattern = match &*event_match.pattern_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_event_match(&self.flattened_keys, &event_match.key, pattern)? } - KnownCondition::RelatedEventMatch(event_match) => { - self.match_related_event_match(event_match, user_id)? + KnownCondition::EventPropertyIs(event_property_is) => { + self.match_event_property_is(event_property_is)? } - KnownCondition::InverseRelatedEventMatch(event_match) => { - !self.match_related_event_match(event_match, user_id)? + KnownCondition::RelatedEventMatch(event_match) => self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + event_match.key.clone(), + event_match.pattern.clone(), + )?, + KnownCondition::RelatedEventMatchType(event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + let pattern = match &*event_match.pattern_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + Some(event_match.key.clone()), + Some(Cow::Borrowed(pattern)), + )? } - KnownCondition::IsUserMention => { - if let Some(uid) = user_id { - self.user_mentions.contains(uid) + KnownCondition::InverseRelatedEventMatch(event_match) => !self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + event_match.key.clone(), + event_match.pattern.clone(), + )?, + KnownCondition::InverseRelatedEventMatchType(event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id } else { - false - } + return Ok(false); + }; + + let pattern = match &*event_match.pattern_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + !self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + Some(event_match.key.clone()), + Some(Cow::Borrowed(pattern)), + )? + } + KnownCondition::EventPropertyContains(event_property_is) => self + .match_event_property_contains( + event_property_is.key.clone(), + event_property_is.value.clone(), + )?, + KnownCondition::ExactEventPropertyContainsType(exact_event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + let pattern = match &*exact_event_match.value_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_event_property_contains( + exact_event_match.key.clone(), + Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())), + )? } - KnownCondition::IsRoomMention => self.room_mention, KnownCondition::ContainsDisplayName => { if let Some(dn) = display_name { if !dn.is_empty() { @@ -316,31 +399,13 @@ impl PushRuleEvaluator { /// Evaluates a `event_match` condition. fn match_event_match( &self, - event_match: &EventMatchCondition, - user_id: Option<&str>, + flattened_event: &BTreeMap, + key: &str, + pattern: &str, ) -> Result { - let pattern = if let Some(pattern) = &event_match.pattern { - pattern - } else if let Some(pattern_type) = &event_match.pattern_type { - // The `pattern_type` can either be "user_id" or "user_localpart", - // either way if we don't have a `user_id` then the condition can't - // match. - let user_id = if let Some(user_id) = user_id { - user_id - } else { - return Ok(false); - }; - - match &**pattern_type { - "user_id" => user_id, - "user_localpart" => get_localpart_from_id(user_id)?, - _ => return Ok(false), - } - } else { - return Ok(false); - }; - - let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) { + let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = + flattened_event.get(key) + { haystack } else { return Ok(false); @@ -348,7 +413,7 @@ impl PushRuleEvaluator { // For the content.body we match against "words", but for everything // else we match against the entire value. - let match_type = if event_match.key == "content.body" { + let match_type = if key == "content.body" { GlobMatchType::Word } else { GlobMatchType::Whole @@ -358,11 +423,31 @@ impl PushRuleEvaluator { compiled_pattern.is_match(haystack) } + /// Evaluates a `event_property_is` condition. + fn match_event_property_is( + &self, + event_property_is: &EventPropertyIsCondition, + ) -> Result { + let value = &event_property_is.value; + + let haystack = if let Some(JsonValue::Value(haystack)) = + self.flattened_keys.get(&*event_property_is.key) + { + haystack + } else { + return Ok(false); + }; + + Ok(haystack == &**value) + } + /// Evaluates a `related_event_match` condition. (MSC3664) fn match_related_event_match( &self, - event_match: &RelatedEventMatchCondition, - user_id: Option<&str>, + rel_type: &str, + include_fallbacks: Option, + key: Option>, + pattern: Option>, ) -> Result { // First check if related event matching is enabled... if !self.related_event_match_enabled { @@ -370,7 +455,7 @@ impl PushRuleEvaluator { } // get the related event, fail if there is none. - let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) { + let event = if let Some(event) = self.related_events_flattened.get(rel_type) { event } else { return Ok(false); @@ -378,57 +463,33 @@ impl PushRuleEvaluator { // If we are not matching fallbacks, don't match if our special key indicating this is a // fallback relation is not present. - if !event_match.include_fallbacks.unwrap_or(false) - && event.contains_key("im.vector.is_falling_back") - { + if !include_fallbacks.unwrap_or(false) && event.contains_key("im.vector.is_falling_back") { return Ok(false); } - // if we have no key, accept the event as matching, if it existed without matching any - // fields. - let key = if let Some(key) = &event_match.key { - key - } else { - return Ok(true); - }; - - let pattern = if let Some(pattern) = &event_match.pattern { - pattern - } else if let Some(pattern_type) = &event_match.pattern_type { - // The `pattern_type` can either be "user_id" or "user_localpart", - // either way if we don't have a `user_id` then the condition can't - // match. - let user_id = if let Some(user_id) = user_id { - user_id - } else { - return Ok(false); - }; - - match &**pattern_type { - "user_id" => user_id, - "user_localpart" => get_localpart_from_id(user_id)?, - _ => return Ok(false), - } - } else { - return Ok(false); - }; + match (key, pattern) { + // if we have no key, accept the event as matching. + (None, _) => Ok(true), + // There was a key, so we *must* have a pattern to go with it. + (Some(_), None) => Ok(false), + // If there is a key & pattern, check if they're in the flattened event (given by rel_type). + (Some(key), Some(pattern)) => self.match_event_match(event, &key, &pattern), + } + } - let haystack = if let Some(haystack) = event.get(&**key) { + /// Evaluates a `event_property_contains` condition. + fn match_event_property_contains( + &self, + key: Cow, + value: Cow, + ) -> Result { + let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) { haystack } else { return Ok(false); }; - // For the content.body we match against "words", but for everything - // else we match against the entire value. - let match_type = if key == "content.body" { - GlobMatchType::Word - } else { - GlobMatchType::Whole - }; - - let mut compiled_pattern = get_glob_matcher(pattern, match_type)?; - compiled_pattern.is_match(haystack) + Ok(haystack.contains(&value)) } /// Match the member count against an 'is' condition @@ -458,12 +519,13 @@ impl PushRuleEvaluator { #[test] fn push_rule_evaluator() { let mut flattened_keys = BTreeMap::new(); - flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); + flattened_keys.insert( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), + ); let evaluator = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), BTreeMap::new(), @@ -485,13 +547,14 @@ fn test_requires_room_version_supports_condition() { use crate::push::{PushRule, PushRules}; let mut flattened_keys = BTreeMap::new(); - flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); + flattened_keys.insert( + "content.body".to_string(), + JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), + ); let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), - false, 10, Some(0), BTreeMap::new(), diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index cea46e9afb1a..b93a20ab3970 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -56,7 +56,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use anyhow::{Context, Error}; use log::warn; +use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; +use pyo3::types::{PyBool, PyList, PyLong, PyString}; use pythonize::{depythonize, pythonize}; use serde::de::Error as _; use serde::{Deserialize, Serialize}; @@ -248,6 +250,65 @@ impl<'de> Deserialize<'de> for Action { } } +/// A simple JSON values (string, int, boolean, or null). +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(untagged)] +pub enum SimpleJsonValue { + Str(String), + Int(i64), + Bool(bool), + Null, +} + +impl<'source> FromPyObject<'source> for SimpleJsonValue { + fn extract(ob: &'source PyAny) -> PyResult { + if let Ok(s) = ::try_from(ob) { + Ok(SimpleJsonValue::Str(s.to_string())) + // A bool *is* an int, ensure we try bool first. + } else if let Ok(b) = ::try_from(ob) { + Ok(SimpleJsonValue::Bool(b.extract()?)) + } else if let Ok(i) = ::try_from(ob) { + Ok(SimpleJsonValue::Int(i.extract()?)) + } else if ob.is_none() { + Ok(SimpleJsonValue::Null) + } else { + Err(PyTypeError::new_err(format!( + "Can't convert from {} to SimpleJsonValue", + ob.get_type().name()? + ))) + } + } +} + +/// A JSON values (list, string, int, boolean, or null). +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(untagged)] +pub enum JsonValue { + Array(Vec), + Value(SimpleJsonValue), +} + +impl<'source> FromPyObject<'source> for JsonValue { + fn extract(ob: &'source PyAny) -> PyResult { + if let Ok(l) = ::try_from(ob) { + match l.iter().map(SimpleJsonValue::extract).collect() { + Ok(a) => Ok(JsonValue::Array(a)), + Err(e) => Err(PyTypeError::new_err(format!( + "Can't convert to JsonValue::Array: {}", + e + ))), + } + } else if let Ok(v) = SimpleJsonValue::extract(ob) { + Ok(JsonValue::Value(v)) + } else { + Err(PyTypeError::new_err(format!( + "Can't convert from {} to JsonValue", + ob.get_type().name()? + ))) + } + } +} + /// A condition used in push rules to match against an event. /// /// We need this split as `serde` doesn't give us the ability to have a @@ -267,14 +328,23 @@ pub enum Condition { #[serde(tag = "kind")] pub enum KnownCondition { EventMatch(EventMatchCondition), + // Identical to event_match but gives predefined patterns. Cannot be added by users. + #[serde(skip_deserializing, rename = "event_match")] + EventMatchType(EventMatchTypeCondition), + EventPropertyIs(EventPropertyIsCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), - #[serde(rename = "im.nheko.msc3664.inverse_related_event_match")] + #[serde(rename = "im.nheko.msc3664.related_event_match")] InverseRelatedEventMatch(RelatedEventMatchCondition), - #[serde(rename = "org.matrix.msc3952.is_user_mention")] - IsUserMention, - #[serde(rename = "org.matrix.msc3952.is_room_mention")] - IsRoomMention, + // Identical to related_event_match but gives predefined patterns. Cannot be added by users. + #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")] + RelatedEventMatchType(RelatedEventMatchTypeCondition), + #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")] + InverseRelatedEventMatchType(RelatedEventMatchTypeCondition), + EventPropertyContains(EventPropertyIsCondition), + // Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users. + #[serde(skip_deserializing, rename = "event_property_contains")] + ExactEventPropertyContainsType(EventPropertyIsTypeCondition), ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -301,14 +371,43 @@ impl<'source> FromPyObject<'source> for Condition { } } -/// The body of a [`Condition::EventMatch`] +/// The body of a [`Condition::EventMatch`] with a pattern. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct EventMatchCondition { pub key: Cow<'static, str>, - #[serde(skip_serializing_if = "Option::is_none")] - pub pattern: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub pattern_type: Option>, + pub pattern: Cow<'static, str>, +} + +#[derive(Serialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] +pub enum EventMatchPatternType { + UserId, + UserLocalpart, +} + +/// The body of a [`Condition::EventMatch`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct EventMatchTypeCondition { + pub key: Cow<'static, str>, + // During serialization, the pattern_type property gets replaced with a + // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user. + pub pattern_type: Cow<'static, EventMatchPatternType>, +} + +/// The body of a [`Condition::EventPropertyIs`] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EventPropertyIsCondition { + pub key: Cow<'static, str>, + pub value: Cow<'static, SimpleJsonValue>, +} + +/// The body of a [`Condition::EventPropertyIs`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct EventPropertyIsTypeCondition { + pub key: Cow<'static, str>, + // During serialization, the pattern_type property gets replaced with a + // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user. + pub value_type: Cow<'static, EventMatchPatternType>, } /// The body of a [`Condition::RelatedEventMatch`] @@ -318,8 +417,18 @@ pub struct RelatedEventMatchCondition { pub key: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub pattern: Option>, + pub rel_type: Cow<'static, str>, #[serde(skip_serializing_if = "Option::is_none")] - pub pattern_type: Option>, + pub include_fallbacks: Option, +} + +/// The body of a [`Condition::RelatedEventMatch`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct RelatedEventMatchTypeCondition { + // This is only used if pattern_type exists (and thus key must exist), so is + // a bit simpler than RelatedEventMatchCondition. + pub key: Cow<'static, str>, + pub pattern_type: Cow<'static, EventMatchPatternType>, pub rel_type: Cow<'static, str>, #[serde(skip_serializing_if = "Option::is_none")] pub include_fallbacks: Option, @@ -503,8 +612,7 @@ impl FilteredPushRules { fn test_serialize_condition() { let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: "content.body".into(), - pattern: Some("coffee".into()), - pattern_type: None, + pattern: "coffee".into(), })); let json = serde_json::to_string(&condition).unwrap(); @@ -518,7 +626,33 @@ fn test_serialize_condition() { fn test_deserialize_condition() { let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#; - let _: Condition = serde_json::from_str(json).unwrap(); + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::EventMatch(_)) + )); +} + +#[test] +fn test_serialize_event_match_condition_with_pattern_type() { + let condition = Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition { + key: "content.body".into(), + pattern_type: Cow::Owned(EventMatchPatternType::UserId), + })); + + let json = serde_json::to_string(&condition).unwrap(); + assert_eq!( + json, + r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"# + ) +} + +#[test] +fn test_cannot_deserialize_event_match_condition_with_pattern_type() { + let json = r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!(condition, Condition::Unknown(_))); } #[test] @@ -532,6 +666,37 @@ fn test_deserialize_unstable_msc3664_condition() { )); } +#[test] +fn test_serialize_unstable_msc3664_condition_with_pattern_type() { + let condition = Condition::Known(KnownCondition::RelatedEventMatchType( + RelatedEventMatchTypeCondition { + key: "content.body".into(), + pattern_type: Cow::Owned(EventMatchPatternType::UserId), + rel_type: "m.in_reply_to".into(), + include_fallbacks: Some(true), + }, + )); + + let json = serde_json::to_string(&condition).unwrap(); + assert_eq!( + json, + r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to","include_fallbacks":true}"# + ) +} + +#[test] +fn test_cannot_deserialize_unstable_msc3664_condition_with_pattern_type() { + let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + // Since pattern is optional on RelatedEventMatch it deserializes it to that + // instead of RelatedEventMatchType. + assert!(matches!( + condition, + Condition::Known(KnownCondition::RelatedEventMatch(_)) + )); +} + #[test] fn test_deserialize_unstable_msc3931_condition() { let json = @@ -545,24 +710,41 @@ fn test_deserialize_unstable_msc3931_condition() { } #[test] -fn test_deserialize_unstable_msc3952_user_condition() { - let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#; +fn test_deserialize_event_property_is_condition() { + // A string condition should work. + let json = r#"{"kind":"event_property_is","key":"content.value","value":"foo"}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::IsUserMention) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); -} -#[test] -fn test_deserialize_unstable_msc3952_room_condition() { - let json = r#"{"kind":"org.matrix.msc3952.is_room_mention"}"#; + // A boolean condition should work. + let json = r#"{"kind":"event_property_is","key":"content.value","value":true}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::EventPropertyIs(_)) + )); + + // An integer condition should work. + let json = r#"{"kind":"event_property_is","key":"content.value","value":1}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::EventPropertyIs(_)) + )); + + // A null condition should work + let json = r#"{"kind":"event_property_is","key":"content.value","value":null}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::IsRoomMention) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); } diff --git a/scripts-dev/check_locked_deps_have_sdists.py b/scripts-dev/check_locked_deps_have_sdists.py new file mode 100755 index 000000000000..63ad99280a0d --- /dev/null +++ b/scripts-dev/check_locked_deps_have_sdists.py @@ -0,0 +1,58 @@ +#! /usr/bin/env python +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from pathlib import Path +from typing import Dict, List + +import tomli + + +def main() -> None: + lockfile_path = Path(__file__).parent.parent.joinpath("poetry.lock") + with open(lockfile_path, "rb") as lockfile: + lockfile_content = tomli.load(lockfile) + + # Poetry 1.3+ lockfile format: + # There's a `files` inline table in each [[package]] + packages_to_assets: Dict[str, List[Dict[str, str]]] = { + package["name"]: package["files"] for package in lockfile_content["package"] + } + + success = True + + for package_name, assets in packages_to_assets.items(): + has_sdist = any(asset["file"].endswith(".tar.gz") for asset in assets) + if not has_sdist: + success = False + print( + f"Locked package {package_name!r} does not have a source distribution!", + file=sys.stderr, + ) + + if not success: + print( + "\nThere were some problems with the Poetry lockfile (poetry.lock).", + file=sys.stderr, + ) + sys.exit(1) + + print( + f"Poetry lockfile OK. {len(packages_to_assets)} locked packages checked.", + file=sys.stderr, + ) + + +if __name__ == "__main__": + main() diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 66aaa3d8480d..1b1761202fdd 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -59,6 +59,11 @@ Run the complement test suite on Synapse. is important. Not suitable for use in CI in case the editable environment is impure. + --rebuild-editable + Force a rebuild of the editable build of Synapse. + This is occasionally useful if the built-in rebuild detection with + --editable fails, e.g. when changing configure_workers_and_start.py. + For help on arguments to 'go test', run 'go help testflag'. EOF } @@ -82,6 +87,9 @@ while [ $# -ge 1 ]; do "-e"|"--editable") use_editable_synapse=1 ;; + "--rebuild-editable") + rebuild_editable_synapse=1 + ;; *) # unknown arg: presumably an argument to gotest. break the loop. break @@ -116,7 +124,9 @@ if [ -n "$use_editable_synapse" ]; then fi editable_mount="$(realpath .):/editable-src:z" - if docker inspect complement-synapse-editable &>/dev/null; then + if [ -n "$rebuild_editable_synapse" ]; then + unset skip_docker_build + elif docker inspect complement-synapse-editable &>/dev/null; then # complement-synapse-editable already exists: see if we can still use it: # - The Rust module must still be importable; it will fail to import if the Rust source has changed. # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 392c509a8ac1..9e4ed3246e7a 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -112,7 +112,7 @@ python3 -m black "${files[@]}" # Catch any common programming mistakes in Python code. # --quiet suppresses the update check. -ruff --quiet "${files[@]}" +ruff --quiet --fix "${files[@]}" # Catch any common programming mistakes in Rust code. # diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index e2bc1640bb2a..473f54772a8a 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -19,7 +19,8 @@ usage() { echo "-c" echo " CI mode. Prints every command that the script runs." echo "-o " - echo " Directory to output full schema files to." + echo " Directory to output full schema files to. You probably want to use" + echo " '-o synapse/storage/schema'" echo "-n " echo " Schema number for the new snapshot. Used to set the location of files within " echo " the output directory, mimicking that of synapse/storage/schemas." @@ -27,6 +28,11 @@ usage() { echo "-h" echo " Display this help text." echo "" + echo "" + echo "You probably want to invoke this with something like" + echo " docker run --rm -e POSTGRES_PASSWORD=postgres -e POSTGRES_USER=postgres -e POSTGRES_DB=synapse -p 5432:5432 postgres:11-alpine" + echo " echo postgres | scripts-dev/make_full_schema.sh -p postgres -n MY_SCHEMA_NUMBER -o synapse/storage/schema" + echo "" echo " NB: make sure to run this against the *oldest* supported version of postgres," echo " or else pg_dump might output non-backwards-compatible syntax." } @@ -189,7 +195,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates +poetry run python synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates # Create the PostgreSQL database. echo "Creating postgres databases..." @@ -198,7 +204,7 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAM createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME" echo "Running db background jobs..." -synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates +poetry run python synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates echo "Dropping unwanted db tables..." @@ -293,4 +299,12 @@ pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owne pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" +if [[ "$OUTPUT_DIR" == *synapse/storage/schema ]]; then + echo "Updating contrib/datagrip symlinks..." + ln -sf "../../synapse/storage/schema/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/common.sql" + ln -sf "../../synapse/storage/schema/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/main.sql" + ln -sf "../../synapse/storage/schema/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/state.sql" +else + echo "Not updating contrib/datagrip symlinks (unknown output directory)" +fi echo "Done! Files dumped to: $OUTPUT_DIR" diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi index 1fe1a136f190..0e745c0a79ac 100644 --- a/stubs/sortedcontainers/sortedlist.pyi +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -29,7 +29,6 @@ _Repr = Callable[[], str] def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ... class SortedList(MutableSequence[_T]): - DEFAULT_LOAD_FACTOR: int = ... def __init__( self, diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 754acab2f978..5d0ce4b1a4bd 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union -from synapse.types import JsonDict +from synapse.types import JsonDict, JsonValue class PushRule: @property @@ -56,14 +56,12 @@ def get_base_rule_ids() -> Collection[str]: ... class PushRuleEvaluator: def __init__( self, - flattened_keys: Mapping[str, str], + flattened_keys: Mapping[str, JsonValue], has_mentions: bool, - user_mentions: Set[str], - room_mention: bool, room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], - related_events_flattened: Mapping[str, Mapping[str, str]], + related_events_flattened: Mapping[str, Mapping[str, JsonValue]], related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, diff --git a/synapse/__init__.py b/synapse/__init__.py index fbfd506a43d0..a203ed533ae5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -1,5 +1,6 @@ # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018-9 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" This is a reference implementation of a Matrix homeserver. +""" This is an implementation of a Matrix homeserver. """ import json diff --git a/synapse/_scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py index 819afaaca6db..0dd36bee20ad 100755 --- a/synapse/_scripts/move_remote_media_to_new_store.py +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -37,7 +37,7 @@ import shutil import sys -from synapse.rest.media.v1.filepath import MediaFilePaths +from synapse.media.filepath import MediaFilePaths logger = logging.getLogger() diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 2b74a4016687..19ca399d446a 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -47,7 +47,6 @@ def request_registration( _print: Callable[[str], None] = print, exit: Callable[[int], None] = sys.exit, ) -> None: - url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),) # Get the nonce @@ -154,7 +153,6 @@ def register_new_user( def main() -> None: - logging.captureWarnings(True) parser = argparse.ArgumentParser( diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 5e137dbbf711..78d76d38ad90 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -94,61 +94,80 @@ logger = logging.getLogger("synapse_port_db") +# SQLite doesn't have a dedicated boolean type (it stores True/False as 1/0). This means +# portdb will read sqlite bools as integers, then try to insert them into postgres +# boolean columns---which fails. Lacking some Python-parseable metaschema, we must +# specify which integer columns should be inserted as booleans into postgres. BOOLEAN_COLUMNS = { - "events": ["processed", "outlier", "contains_url"], - "rooms": ["is_public", "has_auth_chain_index"], + "access_tokens": ["used"], + "account_validity": ["email_sent"], + "device_lists_changes_in_room": ["converted_to_destinations"], + "device_lists_outbound_pokes": ["sent"], + "devices": ["hidden"], + "e2e_fallback_keys_json": ["used"], + "e2e_room_keys": ["is_verified"], "event_edges": ["is_state"], + "events": ["processed", "outlier", "contains_url"], + "local_media_repository": ["safe_from_quarantine"], "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], - "devices": ["hidden"], - "device_lists_outbound_pokes": ["sent"], - "users_who_share_rooms": ["share_private"], - "e2e_room_keys": ["is_verified"], - "account_validity": ["email_sent"], + "pushers": ["enabled"], "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], - "local_media_repository": ["safe_from_quarantine"], + "rooms": ["is_public", "has_auth_chain_index"], "users": ["shadow_banned", "approved"], - "e2e_fallback_keys_json": ["used"], - "access_tokens": ["used"], - "device_lists_changes_in_room": ["converted_to_destinations"], - "pushers": ["enabled"], + "un_partial_stated_event_stream": ["rejection_status_changed"], + "users_who_share_rooms": ["share_private"], } +# These tables are never deleted from in normal operation [*], so we can resume porting +# over rows from a previous attempt rather than starting from scratch. +# +# [*]: We do delete from many of these tables when purging a room, and +# presumably when purging old events. So we might e.g. +# +# 1. Run portdb and port half of some table. +# 2. Stop portdb. +# 3. Purge something, deleting some of the rows we've ported over. +# 4. Restart portdb. The rows deleted from sqlite are still present in postgres. +# +# But this isn't the end of the world: we should be able to repeat the purge +# on the postgres DB when porting completes. APPEND_ONLY_TABLES = [ + "cache_invalidation_stream_by_instance", + "event_auth", + "event_edges", + "event_json", "event_reference_hashes", + "event_search", + "event_to_state_groups", "events", - "event_json", - "state_events", - "room_memberships", - "topics", - "room_names", - "rooms", + "ex_outlier_stream", "local_media_repository", "local_media_repository_thumbnails", + "presence_stream", + "public_room_list_stream", + "push_rules_stream", + "received_transactions", + "redactions", + "rejections", "remote_media_cache", "remote_media_cache_thumbnails", - "redactions", - "event_edges", - "event_auth", - "received_transactions", + "room_memberships", + "room_names", + "rooms", "sent_transactions", - "transaction_id_to_pdu", - "users", + "state_events", + "state_group_edges", "state_groups", "state_groups_state", - "event_to_state_groups", - "rejections", - "event_search", - "presence_stream", - "push_rules_stream", - "ex_outlier_stream", - "cache_invalidation_stream_by_instance", - "public_room_list_stream", - "state_group_edges", "stream_ordering_to_exterm", + "topics", + "transaction_id_to_pdu", + "un_partial_stated_event_stream", + "users", ] @@ -1186,7 +1205,6 @@ def render(self, force: bool = False) -> None: if self.finished: status = "Time spent: %s (Done!)" % (duration_str,) else: - if self.total_processed > 0: left = float(self.total_remaining) / self.total_processed @@ -1311,7 +1329,7 @@ def main() -> None: sqlite_config = { "name": "sqlite3", "args": { - "database": "file:{}?mode=rw".format(args.sqlite_database), + "database": args.sqlite_database, "cp_min": 1, "cp_max": 1, "check_same_thread": False, diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py index b4c96ad7f3e2..077b90935e27 100755 --- a/synapse/_scripts/synctl.py +++ b/synapse/_scripts/synctl.py @@ -167,7 +167,6 @@ def stop(pidfile: str, app: str) -> Optional[int]: def main() -> None: - parser = argparse.ArgumentParser() parser.add_argument( diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 35a3e2ac27e0..4f6b4dbb3b53 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -32,7 +32,6 @@ from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest from synapse.logging.opentracing import ( - SynapseTags, active_span, force_tracing, start_active_span, @@ -162,12 +161,6 @@ async def get_user_by_req( parent_span.set_tag( "authenticated_entity", requester.authenticated_entity ) - # We tag the Synapse instance name so that it's an easy jumping - # off point into the logs. Can also be used to filter for an - # instance that is under load. - parent_span.set_tag( - SynapseTags.INSTANCE_NAME, self.hs.get_instance_name() - ) parent_span.set_tag("user_id", requester.user.to_string()) if requester.device_id is not None: parent_span.set_tag("device_id", requester.device_id) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index c2c177fd71d0..8c6822f3c6ea 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -108,6 +108,15 @@ class Codes(str, Enum): USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL" + AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET" + AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS" + AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT" + AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED" + + # Attempt to send a second annotation with the same event type & annotation key + # MSC2677 + DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" + class CodeMessageException(RuntimeError): """An exception with integer code and message string attributes. @@ -751,3 +760,25 @@ class ModuleFailedException(Exception): Raised when a module API callback fails, for example because it raised an exception. """ + + +class PartialStateConflictError(SynapseError): + """An internal error raised when attempting to persist an event with partial state + after the room containing the event has been un-partial stated. + + This error should be handled by recomputing the event context and trying again. + + This error has an HTTP status code so that it can be transported over replication. + It should not be exposed to clients. + """ + + @staticmethod + def message() -> str: + return "Cannot persist partial state event in un-partial stated room" + + def __init__(self) -> None: + super().__init__( + HTTPStatus.CONFLICT, + msg=PartialStateConflictError.message(), + errcode=Codes.UNKNOWN, + ) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 83c42fc25a22..b9f432cc234a 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -219,9 +219,13 @@ def __init__(self, hs: "HomeServer", filter_json: JsonDict): self._room_timeline_filter = Filter(hs, room_filter_json.get("timeline", {})) self._room_state_filter = Filter(hs, room_filter_json.get("state", {})) self._room_ephemeral_filter = Filter(hs, room_filter_json.get("ephemeral", {})) - self._room_account_data = Filter(hs, room_filter_json.get("account_data", {})) + self._room_account_data_filter = Filter( + hs, room_filter_json.get("account_data", {}) + ) self._presence_filter = Filter(hs, filter_json.get("presence", {})) - self._account_data = Filter(hs, filter_json.get("account_data", {})) + self._global_account_data_filter = Filter( + hs, filter_json.get("account_data", {}) + ) self.include_leave = filter_json.get("room", {}).get("include_leave", False) self.event_fields = filter_json.get("event_fields", []) @@ -256,8 +260,10 @@ async def filter_presence( ) -> List[UserPresenceState]: return await self._presence_filter.filter(presence_states) - async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]: - return await self._account_data.filter(events) + async def filter_global_account_data( + self, events: Iterable[JsonDict] + ) -> List[JsonDict]: + return await self._global_account_data_filter.filter(events) async def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]: return await self._room_state_filter.filter( @@ -279,7 +285,7 @@ async def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDi async def filter_room_account_data( self, events: Iterable[JsonDict] ) -> List[JsonDict]: - return await self._room_account_data.filter( + return await self._room_account_data_filter.filter( await self._room_filter.filter(events) ) @@ -292,6 +298,13 @@ def blocks_all_presence(self) -> bool: or self._presence_filter.filters_all_senders() ) + def blocks_all_global_account_data(self) -> bool: + """True if all global acount data will be filtered out.""" + return ( + self._global_account_data_filter.filters_all_types() + or self._global_account_data_filter.filters_all_senders() + ) + def blocks_all_room_ephemeral(self) -> bool: return ( self._room_ephemeral_filter.filters_all_types() @@ -299,6 +312,13 @@ def blocks_all_room_ephemeral(self) -> bool: or self._room_ephemeral_filter.filters_all_rooms() ) + def blocks_all_room_account_data(self) -> bool: + return ( + self._room_account_data_filter.filters_all_types() + or self._room_account_data_filter.filters_all_senders() + or self._room_account_data_filter.filters_all_rooms() + ) + def blocks_all_room_timeline(self) -> bool: return ( self._room_timeline_filter.filters_all_types() diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a5aa2185a28e..28062dd69d3a 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -213,7 +213,7 @@ def handle_startup_exception(e: Exception) -> NoReturn: def redirect_stdio_to_logs() -> None: streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)] - for (stream, level) in streams: + for stream, level in streams: oldStream = getattr(sys, stream) loggingFile = LoggingFile( logger=twisted.logger.Logger(namespace=stream), diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index fe7afb94755e..b05fe2c589c4 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -17,7 +17,7 @@ import os import sys import tempfile -from typing import List, Optional +from typing import List, Mapping, Optional from twisted.internet import defer, task @@ -44,6 +44,7 @@ ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.filtering import FilteringWorkerStore +from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.profile import ProfileWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore @@ -86,6 +87,7 @@ class AdminCmdSlavedStore( RegistrationWorkerStore, RoomWorkerStore, ProfileWorkerStore, + MediaRepositoryStore, ): def __init__( self, @@ -149,7 +151,7 @@ def write_events(self, room_id: str, events: List[EventBase]) -> None: with open(events_file, "a") as f: for event in events: - print(json.dumps(event.get_pdu_json()), file=f) + json.dump(event.get_pdu_json(), fp=f) def write_state( self, room_id: str, event_id: str, state: StateMap[EventBase] @@ -162,7 +164,7 @@ def write_state( with open(event_file, "a") as f: for event in state.values(): - print(json.dumps(event.get_pdu_json()), file=f) + json.dump(event.get_pdu_json(), fp=f) def write_invite( self, room_id: str, event: EventBase, state: StateMap[EventBase] @@ -178,7 +180,7 @@ def write_invite( with open(invite_state, "a") as f: for event in state.values(): - print(json.dumps(event), file=f) + json.dump(event, fp=f) def write_knock( self, room_id: str, event: EventBase, state: StateMap[EventBase] @@ -194,7 +196,7 @@ def write_knock( with open(knock_state, "a") as f: for event in state.values(): - print(json.dumps(event), file=f) + json.dump(event, fp=f) def write_profile(self, profile: JsonDict) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -202,7 +204,7 @@ def write_profile(self, profile: JsonDict) -> None: profile_file = os.path.join(user_directory, "profile") with open(profile_file, "a") as f: - print(json.dumps(profile), file=f) + json.dump(profile, fp=f) def write_devices(self, devices: List[JsonDict]) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -211,7 +213,7 @@ def write_devices(self, devices: List[JsonDict]) -> None: for device in devices: with open(device_file, "a") as f: - print(json.dumps(device), file=f) + json.dump(device, fp=f) def write_connections(self, connections: List[JsonDict]) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -220,7 +222,28 @@ def write_connections(self, connections: List[JsonDict]) -> None: for connection in connections: with open(connection_file, "a") as f: - print(json.dumps(connection), file=f) + json.dump(connection, fp=f) + + def write_account_data( + self, file_name: str, account_data: Mapping[str, JsonDict] + ) -> None: + account_data_directory = os.path.join( + self.base_directory, "user_data", "account_data" + ) + os.makedirs(account_data_directory, exist_ok=True) + + account_data_file = os.path.join(account_data_directory, file_name) + + with open(account_data_file, "a") as f: + json.dump(account_data, fp=f) + + def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + file_directory = os.path.join(self.base_directory, "media_ids") + os.makedirs(file_directory, exist_ok=True) + media_id_file = os.path.join(file_directory, media_id) + + with open(media_id_file, "w") as f: + json.dump(media_metadata, fp=f) def finished(self) -> str: return self.base_directory diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index 920538f44df2..c8dc3f9d76a1 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -219,7 +219,7 @@ def handle_signal(signum: int, frame: Optional[FrameType]) -> None: # memory space and don't need to repeat the work of loading the code! # Instead of using fork() directly, we use the multiprocessing library, # which uses fork() on Unix platforms. - for (func, worker_args) in zip(worker_functions, args_by_worker): + for func, worker_args in zip(worker_functions, args_by_worker): process = multiprocessing.Process( target=_worker_entrypoint, args=(func, proxy_reactor, worker_args) ) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a9462c9c5262..3c4100bf7129 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -159,7 +159,6 @@ class GenericWorkerServer(HomeServer): DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore def _listen_http(self, listener_config: ListenerConfig) -> None: - assert listener_config.http_options is not None # We always include a health resource. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6176a70eb2a0..b8830b1a9c16 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -321,7 +321,6 @@ def setup(config_options: List[str]) -> SynapseHomeServer: and not config.registration.registrations_require_3pid and not config.registration.registration_requires_token ): - raise ConfigError( "You have enabled open registration without any verification. This is a known vector for " "spam and abuse. If you would like to allow public registration, please consider adding email, " diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 53db1e85b3d8..897dd3edac3e 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -15,7 +15,7 @@ import math import resource import sys -from typing import TYPE_CHECKING, List, Sized, Tuple +from typing import TYPE_CHECKING, List, Mapping, Sized, Tuple from prometheus_client import Gauge @@ -194,7 +194,7 @@ def performance_stats_init() -> None: @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users() -> None: current_mau_count = 0 - current_mau_count_by_service = {} + current_mau_count_by_service: Mapping[str, int] = {} reserved_users: Sized = () store = hs.get_datastores().main if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 1a6f69e7d3f3..4812fb44961f 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -266,6 +266,19 @@ async def _get() -> Optional[JsonDict]: key = (service.id, protocol) return await self.protocol_meta_cache.wrap(key, _get) + async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None: + # The caller should check that url is set + assert service.url is not None, "ping called without URL being set" + + # This is required by the configuration. + assert service.hs_token is not None + + await self.post_json_get_json( + uri=service.url + "/_matrix/app/unstable/fi.mau.msc2659/ping", + post_json={"transaction_id": txn_id}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, + ) + async def push_bulk( self, service: "ApplicationService", diff --git a/synapse/config/consent.py b/synapse/config/consent.py index be74609dc42e..5bfd0cbb7163 100644 --- a/synapse/config/consent.py +++ b/synapse/config/consent.py @@ -22,7 +22,6 @@ class ConsentConfig(Config): - section = "consent" def __init__(self, *args: Any): diff --git a/synapse/config/database.py b/synapse/config/database.py index 928fec8dfe1b..596d8769fe3c 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -154,7 +154,6 @@ def read_arguments(self, args: argparse.Namespace) -> None: logger.warning(NON_SQLITE_DATABASE_PATH_WARNING) def set_databasepath(self, database_path: str) -> None: - if database_path != ":memory:": database_path = self.abspath(database_path) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 12d6f51d12b8..e9442b17bd7b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -187,3 +187,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.msc3958_supress_edit_notifs = experimental.get( "msc3958_supress_edit_notifs", False ) + + # MSC3967: Do not require UIA when first uploading cross signing keys + self.msc3967_enabled = experimental.get("msc3967_enabled", False) + + # MSC2659: Application service ping endpoint + self.msc2659_enabled = experimental.get("msc2659_enabled", False) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 4d2b298a70be..c205a78039b6 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -56,7 +56,6 @@ class HomeServerConfig(RootConfig): - config_classes = [ ModulesConfig, ServerConfig, diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 5c13fe428a70..a5514e70a21d 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -46,7 +46,6 @@ class RatelimitConfig(Config): section = "ratelimiting" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - # Load the new-style messages config if it exists. Otherwise fall back # to the old method. if "rc_message" in config: @@ -87,9 +86,18 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: defaults={"per_second": 0.1, "burst_count": 5}, ) + # It is reasonable to login with a bunch of devices at once (i.e. when + # setting up an account), but it is *not* valid to continually be + # logging into new devices. rc_login_config = config.get("rc_login", {}) - self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {})) - self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {})) + self.rc_login_address = RatelimitSettings( + rc_login_config.get("address", {}), + defaults={"per_second": 0.003, "burst_count": 5}, + ) + self.rc_login_account = RatelimitSettings( + rc_login_config.get("account", {}), + defaults={"per_second": 0.003, "burst_count": 5}, + ) self.rc_login_failed_attempts = RatelimitSettings( rc_login_config.get("failed_attempts", {}) ) diff --git a/synapse/config/redis.py b/synapse/config/redis.py index ac93f6c6089d..17e4bf42d682 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -33,6 +33,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) + self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") cache_shard_config = redis_config.get("cache_shards", {}) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index e4759711ed95..ecb3edbe3ade 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -116,7 +116,6 @@ class ContentRepositoryConfig(Config): section = "media" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if ( @@ -179,11 +178,13 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: for i, provider_config in enumerate(storage_providers): # We special case the module "file_system" so as not to need to # expose FileStorageProviderBackend - if provider_config["module"] == "file_system": - provider_config["module"] = ( - "synapse.rest.media.v1.storage_provider" - ".FileStorageProviderBackend" - ) + if ( + provider_config["module"] == "file_system" + or provider_config["module"] == "synapse.rest.media.v1.storage_provider" + ): + provider_config[ + "module" + ] = "synapse.media.storage_provider.FileStorageProviderBackend" provider_class, parsed_config = load_module( provider_config, ("media_storage_providers", "" % i) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 3ed236217fd4..8666c22f010d 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List +from typing import Any, Collection from matrix_common.regex import glob_to_regex @@ -70,7 +70,7 @@ def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> b return False def is_publishing_room_allowed( - self, user_id: str, room_id: str, aliases: List[str] + self, user_id: str, room_id: str, aliases: Collection[str] ) -> bool: """Checks if the given user is allowed to publish the room @@ -122,7 +122,7 @@ def __init__(self, option_name: str, rule: JsonDict): except Exception as e: raise ConfigError("Failed to parse glob into regex") from e - def matches(self, user_id: str, room_id: str, aliases: List[str]) -> bool: + def matches(self, user_id: str, room_id: str, aliases: Collection[str]) -> bool: """Tests if this rule matches the given user_id, room_id and aliases. Args: diff --git a/synapse/config/server.py b/synapse/config/server.py index ecdaa2d9dd24..0e46b849cf06 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -177,6 +177,7 @@ def generate_ip_set( "client", "consent", "federation", + "health", "keys", "media", "metrics", @@ -734,7 +735,6 @@ def generate_config_section( listeners: Optional[List[dict]], **kwargs: Any, ) -> str: - _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: unsecure_port = bind_port - 400 diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 336fe3e0daaf..318270ebb8ac 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -30,7 +30,6 @@ class TlsConfig(Config): section = "tls" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 86cd4af9bd5a..d710607c6367 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -399,7 +399,7 @@ async def _inner_fetch_key_requests( # We now convert the returned list of results into a map from server # name to key ID to FetchKeyResult, to return. to_return: Dict[str, Dict[str, FetchKeyResult]] = {} - for (request, results) in zip(deduped_requests, results_per_request): + for request, results in zip(deduped_requests, results_per_request): to_return_by_server = to_return.setdefault(request.server_name, {}) for key_id, key_result in results.items(): existing = to_return_by_server.get(key_id) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e0be9f88cc9d..af55874b5c47 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -16,18 +16,7 @@ import collections.abc import logging import typing -from typing import ( - Any, - Collection, - Dict, - Iterable, - List, - Mapping, - Optional, - Set, - Tuple, - Union, -) +from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -56,7 +45,13 @@ RoomVersions, ) from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id +from synapse.types import ( + MutableStateMap, + StateMap, + StrCollection, + UserID, + get_domain_from_id, +) if typing.TYPE_CHECKING: # conditional imports to avoid import cycle @@ -69,7 +64,7 @@ class _EventSourceStore(Protocol): async def get_events( self, - event_ids: Collection[str], + event_ids: StrCollection, redact_behaviour: EventRedactBehaviour, get_prev_content: bool = False, allow_rejected: bool = False, @@ -173,13 +168,24 @@ async def check_state_independent_auth_rules( return # 2. Reject if event has auth_events that: ... - auth_events = await store.get_events( - event.auth_event_ids(), - redact_behaviour=EventRedactBehaviour.as_is, - allow_rejected=True, - ) if batched_auth_events: - auth_events.update(batched_auth_events) + # Copy the batched auth events to avoid mutating them. + auth_events = dict(batched_auth_events) + needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys() + if needed_auth_event_ids: + auth_events.update( + await store.get_events( + needed_auth_event_ids, + redact_behaviour=EventRedactBehaviour.as_is, + allow_rejected=True, + ) + ) + else: + auth_events = await store.get_events( + event.auth_event_ids(), + redact_behaviour=EventRedactBehaviour.as_is, + allow_rejected=True, + ) room_id = event.room_id auth_dict: MutableStateMap[str] = {} diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 8aca9a3ab9e9..91118a8d847f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -39,7 +39,7 @@ from synapse.api.constants import RelationTypes from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, StrCollection from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze from synapse.util.stringutils import strtobool @@ -413,7 +413,7 @@ def prev_event_ids(self) -> Sequence[str]: """ return [e for e, _ in self._dict["prev_events"]] - def auth_event_ids(self) -> Sequence[str]: + def auth_event_ids(self) -> StrCollection: """Returns the list of auth event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -558,7 +558,7 @@ def prev_event_ids(self) -> Sequence[str]: """ return self._dict["prev_events"] - def auth_event_ids(self) -> Sequence[str]: + def auth_event_ids(self) -> StrCollection: """Returns the list of auth event IDs. The order matches the order specified in the event, though there is no meaning to it. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 94dd1298e177..c82745275f94 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union import attr from signedjson.types import SigningKey @@ -103,7 +103,7 @@ def is_state(self) -> bool: async def build( self, - prev_event_ids: List[str], + prev_event_ids: Collection[str], auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: @@ -136,7 +136,7 @@ async def build( format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] + prev_events: Union[Collection[str], List[Tuple[str, Dict[str, str]]]] auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 6eaef8b57a03..c04ad08cbb61 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from abc import ABC, abstractmethod from typing import TYPE_CHECKING, List, Optional, Tuple import attr @@ -22,12 +23,56 @@ if TYPE_CHECKING: from synapse.storage.controllers import StorageControllers + from synapse.storage.databases import StateGroupDataStore from synapse.storage.databases.main import DataStore from synapse.types.state import StateFilter +class UnpersistedEventContextBase(ABC): + """ + This is a base class for EventContext and UnpersistedEventContext, objects which + hold information relevant to storing an associated event. Note that an + UnpersistedEventContexts must be converted into an EventContext before it is + suitable to send to the db with its associated event. + + Attributes: + _storage: storage controllers for interfacing with the database + app_service: If the associated event is being sent by a (local) application service, that + app service. + """ + + def __init__(self, storage_controller: "StorageControllers"): + self._storage: "StorageControllers" = storage_controller + self.app_service: Optional[ApplicationService] = None + + @abstractmethod + async def persist( + self, + event: EventBase, + ) -> "EventContext": + """ + A method to convert an UnpersistedEventContext to an EventContext, suitable for + sending to the database with the associated event. + """ + pass + + @abstractmethod + async def get_prev_state_ids( + self, state_filter: Optional["StateFilter"] = None + ) -> StateMap[str]: + """ + Gets the room state at the event (ie not including the event if the event is a + state event). + + Args: + state_filter: specifies the type of state event to fetch from DB, example: + EventTypes.JoinRules + """ + pass + + @attr.s(slots=True, auto_attribs=True) -class EventContext: +class EventContext(UnpersistedEventContextBase): """ Holds information relevant to persisting an event @@ -77,9 +122,6 @@ class EventContext: delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group`` and ``state_group``. - app_service: If this event is being sent by a (local) application service, that - app service. - partial_state: if True, we may be storing this event with a temporary, incomplete state. """ @@ -122,6 +164,9 @@ def for_outlier( """Return an EventContext instance suitable for persisting an outlier event""" return EventContext(storage=storage) + async def persist(self, event: EventBase) -> "EventContext": + return self + async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict: """Converts self to a type that can be serialized as JSON, and then deserialized by `deserialize` @@ -248,11 +293,182 @@ async def get_prev_state_ids( Maps a (type, state_key) to the event ID of the state event matching this tuple. """ + + assert self.state_group_before_event is not None + return await self._storage.state.get_state_ids_for_group( + self.state_group_before_event, state_filter + ) + + +@attr.s(slots=True, auto_attribs=True) +class UnpersistedEventContext(UnpersistedEventContextBase): + """ + The event context holds information about the state groups for an event. It is important + to remember that an event technically has two state groups: the state group before the + event, and the state group after the event. If the event is not a state event, the state + group will not change (ie the state group before the event will be the same as the state + group after the event), but if it is a state event the state group before the event + will differ from the state group after the event. + This is a version of an EventContext before the new state group (if any) has been + computed and stored. It contains information about the state before the event (which + also may be the information after the event, if the event is not a state event). The + UnpersistedEventContext must be converted into an EventContext by calling the method + 'persist' on it before it is suitable to be sent to the DB for processing. + + state_group_after_event: + The state group after the event. This will always be None until it is persisted. + If the event is not a state event, this will be the same as + state_group_before_event. + + state_group_before_event: + The ID of the state group representing the state of the room before this event. + + state_delta_due_to_event: + If the event is a state event, then this is the delta of the state between + `state_group` and `state_group_before_event` + + prev_group_for_state_group_before_event: + If it is known, ``state_group_before_event``'s previous state group. + + delta_ids_to_state_group_before_event: + If ``prev_group_for_state_group_before_event`` is not None, the state delta + between ``prev_group_for_state_group_before_event`` and ``state_group_before_event``. + + partial_state: + Whether the event has partial state. + + state_map_before_event: + A map of the state before the event, i.e. the state at `state_group_before_event` + """ + + _storage: "StorageControllers" + state_group_before_event: Optional[int] + state_group_after_event: Optional[int] + state_delta_due_to_event: Optional[dict] + prev_group_for_state_group_before_event: Optional[int] + delta_ids_to_state_group_before_event: Optional[StateMap[str]] + partial_state: bool + state_map_before_event: Optional[StateMap[str]] = None + + @classmethod + async def batch_persist_unpersisted_contexts( + cls, + events_and_context: List[Tuple[EventBase, "UnpersistedEventContextBase"]], + room_id: str, + last_known_state_group: int, + datastore: "StateGroupDataStore", + ) -> List[Tuple[EventBase, EventContext]]: + """ + Takes a list of events and their associated unpersisted contexts and persists + the unpersisted contexts, returning a list of events and persisted contexts. + Note that all the events must be in a linear chain (ie a <- b <- c). + + Args: + events_and_context: A list of events and their unpersisted contexts + room_id: the room_id for the events + last_known_state_group: the last persisted state group + datastore: a state datastore + """ + amended_events_and_context = await datastore.store_state_deltas_for_batched( + events_and_context, room_id, last_known_state_group + ) + + events_and_persisted_context = [] + for event, unpersisted_context in amended_events_and_context: + if event.is_state(): + context = EventContext( + storage=unpersisted_context._storage, + state_group=unpersisted_context.state_group_after_event, + state_group_before_event=unpersisted_context.state_group_before_event, + state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, + partial_state=unpersisted_context.partial_state, + prev_group=unpersisted_context.state_group_before_event, + delta_ids=unpersisted_context.state_delta_due_to_event, + ) + else: + context = EventContext( + storage=unpersisted_context._storage, + state_group=unpersisted_context.state_group_after_event, + state_group_before_event=unpersisted_context.state_group_before_event, + state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, + partial_state=unpersisted_context.partial_state, + prev_group=unpersisted_context.prev_group_for_state_group_before_event, + delta_ids=unpersisted_context.delta_ids_to_state_group_before_event, + ) + events_and_persisted_context.append((event, context)) + return events_and_persisted_context + + async def get_prev_state_ids( + self, state_filter: Optional["StateFilter"] = None + ) -> StateMap[str]: + """ + Gets the room state map, excluding this event. + + Args: + state_filter: specifies the type of state event to fetch from DB + + Returns: + Maps a (type, state_key) to the event ID of the state event matching + this tuple. + """ + if self.state_map_before_event: + return self.state_map_before_event + assert self.state_group_before_event is not None return await self._storage.state.get_state_ids_for_group( self.state_group_before_event, state_filter ) + async def persist(self, event: EventBase) -> EventContext: + """ + Creates a full `EventContext` for the event, persisting any referenced state that + has not yet been persisted. + + Args: + event: event that the EventContext is associated with. + + Returns: An EventContext suitable for sending to the database with the event + for persisting + """ + assert self.partial_state is not None + + # If we have a full set of state for before the event but don't have a state + # group for that state, we need to get one + if self.state_group_before_event is None: + assert self.state_map_before_event + state_group_before_event = await self._storage.state.store_state_group( + event.event_id, + event.room_id, + prev_group=self.prev_group_for_state_group_before_event, + delta_ids=self.delta_ids_to_state_group_before_event, + current_state_ids=self.state_map_before_event, + ) + self.state_group_before_event = state_group_before_event + + # if the event isn't a state event the state group doesn't change + if not self.state_delta_due_to_event: + state_group_after_event = self.state_group_before_event + + # otherwise if it is a state event we need to get a state group for it + else: + state_group_after_event = await self._storage.state.store_state_group( + event.event_id, + event.room_id, + prev_group=self.state_group_before_event, + delta_ids=self.state_delta_due_to_event, + current_state_ids=None, + ) + + return EventContext.with_state( + storage=self._storage, + state_group=state_group_after_event, + state_group_before_event=self.state_group_before_event, + state_delta_due_to_event=self.state_delta_due_to_event, + partial_state=self.partial_state, + prev_group=self.state_group_before_event, + delta_ids=self.state_delta_due_to_event, + ) + def _encode_state_dict( state_dict: Optional[StateMap[str]], diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 623a2c71eadb..765c15bb5135 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -33,8 +33,8 @@ import synapse from synapse.api.errors import Codes from synapse.logging.opentracing import trace -from synapse.rest.media.v1._base import FileInfo -from synapse.rest.media.v1.media_storage import ReadableFileWrapper +from synapse.media._base import FileInfo +from synapse.media.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import JsonDict, RoomAlias, UserProfile from synapse.util.async_helpers import delay_cancellation, maybe_awaitable diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 72ab69689887..61d4530be784 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -18,7 +18,7 @@ from synapse.api.errors import ModuleFailedException, SynapseError from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import UnpersistedEventContextBase from synapse.storage.roommember import ProfileInfo from synapse.types import Requester, StateMap from synapse.util.async_helpers import delay_cancellation, maybe_awaitable @@ -45,6 +45,8 @@ ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable] +ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] +ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: @@ -78,7 +80,6 @@ def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: # correctly, we need to await its result. Therefore it doesn't make a lot of # sense to make it go through the run() wrapper. if f.__name__ == "check_event_allowed": - # We need to wrap check_event_allowed because its old form would return either # a boolean or a dict, but now we want to return the dict separately from the # boolean. @@ -100,7 +101,6 @@ async def wrap_check_event_allowed( return wrap_check_event_allowed if f.__name__ == "on_create_room": - # We need to wrap on_create_room because its old form would return a boolean # if the room creation is denied, but now we just want it to raise an # exception. @@ -174,6 +174,12 @@ def __init__(self, hs: "HomeServer"): ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = [] self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = [] + self._on_add_user_third_party_identifier_callbacks: List[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = [] + self._on_remove_user_third_party_identifier_callbacks: List[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = [] def register_third_party_rules_callbacks( self, @@ -193,6 +199,12 @@ def register_third_party_rules_callbacks( ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = None, on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, + on_add_user_third_party_identifier: Optional[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, + on_remove_user_third_party_identifier: Optional[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -230,8 +242,20 @@ def register_third_party_rules_callbacks( if on_threepid_bind is not None: self._on_threepid_bind_callbacks.append(on_threepid_bind) + if on_add_user_third_party_identifier is not None: + self._on_add_user_third_party_identifier_callbacks.append( + on_add_user_third_party_identifier + ) + + if on_remove_user_third_party_identifier is not None: + self._on_remove_user_third_party_identifier_callbacks.append( + on_remove_user_third_party_identifier + ) + async def check_event_allowed( - self, event: EventBase, context: EventContext + self, + event: EventBase, + context: UnpersistedEventContextBase, ) -> Tuple[bool, Optional[dict]]: """Check if a provided event should be allowed in the given context. @@ -511,6 +535,9 @@ async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> Non local homeserver, not when it's created on an identity server (and then kept track of so that it can be unbound on the same IS later on). + THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the + `on_add_user_third_party_identifier` callback method instead. + Args: user_id: the user being associated with the threepid. medium: the threepid's medium. @@ -523,3 +550,44 @@ async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> Non logger.exception( "Failed to run module API callback %s: %s", callback, e ) + + async def on_add_user_third_party_identifier( + self, user_id: str, medium: str, address: str + ) -> None: + """Called when an association between a user's Matrix ID and a third-party ID + (email, phone number) has successfully been registered on the homeserver. + + Args: + user_id: The User ID included in the association. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + """ + for callback in self._on_add_user_third_party_identifier_callbacks: + try: + await callback(user_id, medium, address) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + + async def on_remove_user_third_party_identifier( + self, user_id: str, medium: str, address: str + ) -> None: + """Called when an association between a user's Matrix ID and a third-party ID + (email, phone number) has been successfully removed on the homeserver. + + This is called *after* any known bindings on identity servers for this + association have been removed. + + Args: + user_id: The User ID included in the removed association. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + """ + for callback in self._on_remove_user_third_party_identifier_callbacks: + try: + await callback(user_id, medium, address) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index caf6ba0605af..1ff05cfdc5da 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,8 +38,7 @@ ) from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion -from synapse.types import JsonDict -from synapse.util.frozenutils import unfreeze +from synapse.types import JsonDict, Requester from . import EventBase @@ -317,8 +316,9 @@ class SerializeEventConfig: as_client_event: bool = True # Function to convert from federation format to client format event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 - # ID of the user's auth token - used for namespacing of transaction IDs - token_id: Optional[int] = None + # The entity that requested the event. This is used to determine whether to include + # the transaction_id in the unsigned section of the event. + requester: Optional[Requester] = None # List of event fields to include. If empty, all fields will be returned. only_event_fields: Optional[List[str]] = None # Some events can have stripped room state stored in the `unsigned` field. @@ -368,11 +368,24 @@ def serialize_event( e.unsigned["redacted_because"], time_now_ms, config=config ) - if config.token_id is not None: - if config.token_id == getattr(e.internal_metadata, "token_id", None): - txn_id = getattr(e.internal_metadata, "txn_id", None) - if txn_id is not None: - d["unsigned"]["transaction_id"] = txn_id + # If we have a txn_id saved in the internal_metadata, we should include it in the + # unsigned section of the event if it was sent by the same session as the one + # requesting the event. + # There is a special case for guests, because they only have one access token + # without associated access_token_id, so we always include the txn_id for events + # they sent. + txn_id = getattr(e.internal_metadata, "txn_id", None) + if txn_id is not None and config.requester is not None: + event_token_id = getattr(e.internal_metadata, "token_id", None) + if config.requester.user.to_string() == e.sender and ( + ( + event_token_id is not None + and config.requester.access_token_id is not None + and event_token_id == config.requester.access_token_id + ) + or config.requester.is_guest + ): + d["unsigned"]["transaction_id"] = txn_id # Beeper: include internal stream ordering as HS order unsigned hint stream_ordering = getattr(e.internal_metadata, "stream_ordering", None) @@ -423,7 +436,6 @@ def serialize_event( *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - apply_edits: bool = True, ) -> JsonDict: """Serializes a single event. @@ -433,10 +445,7 @@ def serialize_event( config: Event serialization config bundle_aggregations: A map from event_id to the aggregations to be bundled into the event. - apply_edits: Whether the content of the event should be modified to reflect - any replacement in `bundle_aggregations[].replace`. - See also the `inhibit_replacement_via_edits` constructor arg: if that is - set to True, then this argument is ignored. + Returns: The serialized event """ @@ -455,38 +464,10 @@ def serialize_event( config, bundle_aggregations, serialized_event, - apply_edits=apply_edits, ) return serialized_event - def _apply_edit( - self, orig_event: EventBase, serialized_event: JsonDict, edit: EventBase - ) -> None: - """Replace the content, preserving existing relations of the serialized event. - - Args: - orig_event: The original event. - serialized_event: The original event, serialized. This is modified. - edit: The event which edits the above. - """ - - # Ensure we take copies of the edit content, otherwise we risk modifying - # the original event. - edit_content = edit.content.copy() - - # Unfreeze the event content if necessary, so that we may modify it below - edit_content = unfreeze(edit_content) - serialized_event["content"] = edit_content.get("m.new_content", {}) - - # Check for existing relations - relates_to = orig_event.content.get("m.relates_to") - if relates_to: - # Keep the relations, ensuring we use a dict copy of the original - serialized_event["content"]["m.relates_to"] = relates_to.copy() - else: - serialized_event["content"].pop("m.relates_to", None) - def _inject_bundled_aggregations( self, event: EventBase, @@ -494,7 +475,6 @@ def _inject_bundled_aggregations( config: SerializeEventConfig, bundled_aggregations: Dict[str, "BundledAggregations"], serialized_event: JsonDict, - apply_edits: bool, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. @@ -509,9 +489,6 @@ def _inject_bundled_aggregations( While serializing the bundled aggregations this map may be searched again for additional events in a recursive manner. serialized_event: The serialized event which may be modified. - apply_edits: Whether the content of the event should be modified to reflect - any replacement in `aggregations.replace` (subject to the - `inhibit_replacement_via_edits` constructor arg). """ # We have already checked that aggregations exist for this event. @@ -521,22 +498,12 @@ def _inject_bundled_aggregations( # being serialized. serialized_aggregations = {} - if event_aggregations.annotations: - serialized_aggregations[ - RelationTypes.ANNOTATION - ] = event_aggregations.annotations - if event_aggregations.references: serialized_aggregations[ RelationTypes.REFERENCE ] = event_aggregations.references if event_aggregations.replace: - # If there is an edit, optionally apply it to the event. - edit = event_aggregations.replace - if apply_edits and not self._inhibit_replacement_via_edits: - self._apply_edit(event, serialized_event, edit) - # Include information about it in the relations dict. # # Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships) @@ -544,10 +511,7 @@ def _inject_bundled_aggregations( # `sender` of the edit; however MSC3925 proposes extending it to the whole # of the edit, which is what we do here. serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( - edit, - time_now, - config=config, - apply_edits=False, + event_aggregations.replace, time_now, config=config ) # Include any threaded replies to this event. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 0ac85a3be717..7d04560dca78 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -884,7 +884,7 @@ async def _try_destination_list( if 500 <= e.code < 600: failover = True - elif e.code == 400 and synapse_error.errcode in failover_errcodes: + elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes: failover = True elif failover_on_unknown_endpoint and self._is_unknown_endpoint( @@ -999,14 +999,13 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: return destination, ev, room_version + failover_errcodes = {Codes.NOT_FOUND} # MSC3083 defines additional error codes for room joins. Unfortunately # we do not yet know the room version, assume these will only be returned # by valid room versions. - failover_errcodes = ( - (Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN) - if membership == Membership.JOIN - else None - ) + if membership == Membership.JOIN: + failover_errcodes.add(Codes.UNABLE_AUTHORISE_JOIN) + failover_errcodes.add(Codes.UNABLE_TO_GRANT_JOIN) return await self._try_destination_list( "make_" + membership, diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8d36172484d6..6d99845de561 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -23,6 +23,7 @@ Collection, Dict, List, + Mapping, Optional, Tuple, Union, @@ -47,6 +48,7 @@ FederationError, IncompatibleRoomVersionError, NotFoundError, + PartialStateConflictError, SynapseError, UnsupportedRoomVersionError, ) @@ -80,7 +82,6 @@ ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, ) -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary @@ -1512,7 +1513,7 @@ async def on_query(self, query_type: str, args: dict) -> JsonDict: def _get_event_ids_for_partial_state_join( join_event: EventBase, prev_state_ids: StateMap[str], - summary: Dict[str, MemberSummary], + summary: Mapping[str, MemberSummary], ) -> Collection[str]: """Calculate state to be returned in a partial_state send_join diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index d720b5fd3fe2..3063df799099 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -314,7 +314,7 @@ async def get_replication_rows( # stream position. keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]} - for ((destination, edu_key), pos) in keyed_edus.items(): + for (destination, edu_key), pos in keyed_edus.items(): rows.append( ( pos, @@ -329,7 +329,7 @@ async def get_replication_rows( j = self.edus.bisect_right(to_token) + 1 edus = self.edus.items()[i:j] - for (pos, edu) in edus: + for pos, edu in edus: rows.append((pos, EduRow(edu))) # Sort rows based on pos diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index ffc9d95ee703..31c5c2b7dee2 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -497,8 +497,8 @@ async def _catch_up_transmission_loop(self) -> None: # # Note: `catchup_pdus` will have exactly one PDU per room. for pdu in catchup_pdus: - # The PDU from the DB will be the last PDU in the room from - # *this server* that wasn't sent to the remote. However, other + # The PDU from the DB will be the newest PDU in the room from + # *this server* that we tried---but were unable---to send to the remote. # servers may have sent lots of events since then, and we want # to try and tell the remote only about the *latest* events in # the room. This is so that it doesn't get inundated by events @@ -516,6 +516,11 @@ async def _catch_up_transmission_loop(self) -> None: # If the event is in the extremities, then great! We can just # use that without having to do further checks. room_catchup_pdus = [pdu] + elif await self._store.is_partial_state_room(pdu.room_id): + # We can't be sure which events the destination should + # see using only partial state. Avoid doing so, and just retry + # sending our the newest PDU the remote is missing from us. + room_catchup_pdus = [pdu] else: # If not, fetch the extremities and figure out which we can # send. @@ -547,6 +552,8 @@ async def _catch_up_transmission_loop(self) -> None: self._server_name, new_pdus, redact=False, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) # If we've filtered out all the extremities, fall back to diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 67e789eef70e..7e01c18c6c90 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -155,9 +155,6 @@ async def remove_account_data_for_room( max_stream_id = await self._store.remove_account_data_for_room( user_id, room_id, account_data_type ) - if max_stream_id is None: - # The referenced account data did not exist, so no delete occurred. - return None self._notifier.on_new_event( StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id] @@ -230,9 +227,6 @@ async def remove_account_data_for_user( max_stream_id = await self._store.remove_account_data_for_user( user_id, account_data_type ) - if max_stream_id is None: - # The referenced account data did not exist, so no delete occurred. - return None self._notifier.on_new_event( StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id] @@ -248,7 +242,6 @@ async def remove_account_data_for_user( instance_name=random.choice(self._account_data_writers), user_id=user_id, account_data_type=account_data_type, - content={}, ) return response["max_stream_id"] @@ -343,10 +336,12 @@ async def get_new_events( } ) - ( - account_data, - room_account_data, - ) = await self.store.get_updated_account_data_for_user(user_id, last_stream_id) + account_data = await self.store.get_updated_global_account_data_for_user( + user_id, last_stream_id + ) + room_account_data = await self.store.get_updated_room_account_data_for_user( + user_id, last_stream_id + ) for account_data_type, content in account_data.items(): results.append({"type": account_data_type, "content": content}) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 33e45e3a1136..4aa4ebf7e4a8 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -15,9 +15,7 @@ import email.mime.multipart import email.utils import logging -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple - -from twisted.web.http import Request +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -30,25 +28,17 @@ logger = logging.getLogger(__name__) -# Types for callbacks to be registered via the module api -IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]] -ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable] -# Temporary hooks to allow for a transition from `/_matrix/client` endpoints -# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`. -ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] -ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]] -ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable] - class AccountValidityHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.config = hs.config - self.store = self.hs.get_datastores().main - self.send_email_handler = self.hs.get_send_email_handler() - self.clock = self.hs.get_clock() + self.store = hs.get_datastores().main + self.send_email_handler = hs.get_send_email_handler() + self.clock = hs.get_clock() - self._app_name = self.hs.config.email.email_app_name + self._app_name = hs.config.email.email_app_name + self._module_api_callbacks = hs.get_module_api_callbacks().account_validity self._account_validity_enabled = ( hs.config.account_validity.account_validity_enabled @@ -78,69 +68,6 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.run_background_tasks: self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) - self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] - self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = [] - self._on_legacy_send_mail_callback: Optional[ - ON_LEGACY_SEND_MAIL_CALLBACK - ] = None - self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None - - # The legacy admin requests callback isn't a protected attribute because we need - # to access it from the admin servlet, which is outside of this handler. - self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None - - def register_account_validity_callbacks( - self, - is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, - on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, - on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, - on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, - on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, - ) -> None: - """Register callbacks from module for each hook.""" - if is_user_expired is not None: - self._is_user_expired_callbacks.append(is_user_expired) - - if on_user_registration is not None: - self._on_user_registration_callbacks.append(on_user_registration) - - # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and - # an admin one). As part of moving the feature into a module, we need to change - # the path from /_matrix/client/unstable/account_validity/... to - # /_synapse/client/account_validity, because: - # - # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix - # * the way we register servlets means that modules can't register resources - # under /_matrix/client - # - # We need to allow for a transition period between the old and new endpoints - # in order to allow for clients to update (and for emails to be processed). - # - # Once the email-account-validity module is loaded, it will take control of account - # validity by moving the rows from our `account_validity` table into its own table. - # - # Therefore, we need to allow modules (in practice just the one implementing the - # email-based account validity) to temporarily hook into the legacy endpoints so we - # can route the traffic coming into the old endpoints into the module, which is - # why we have the following three temporary hooks. - if on_legacy_send_mail is not None: - if self._on_legacy_send_mail_callback is not None: - raise RuntimeError("Tried to register on_legacy_send_mail twice") - - self._on_legacy_send_mail_callback = on_legacy_send_mail - - if on_legacy_renew is not None: - if self._on_legacy_renew_callback is not None: - raise RuntimeError("Tried to register on_legacy_renew twice") - - self._on_legacy_renew_callback = on_legacy_renew - - if on_legacy_admin_request is not None: - if self.on_legacy_admin_request_callback is not None: - raise RuntimeError("Tried to register on_legacy_admin_request twice") - - self.on_legacy_admin_request_callback = on_legacy_admin_request - async def is_user_expired(self, user_id: str) -> bool: """Checks if a user has expired against third-party modules. @@ -150,7 +77,7 @@ async def is_user_expired(self, user_id: str) -> bool: Returns: Whether the user has expired. """ - for callback in self._is_user_expired_callbacks: + for callback in self._module_api_callbacks.is_user_expired_callbacks: expired = await delay_cancellation(callback(user_id)) if expired is not None: return expired @@ -168,7 +95,7 @@ async def on_user_registration(self, user_id: str) -> None: Args: user_id: The ID of the newly registered user. """ - for callback in self._on_user_registration_callbacks: + for callback in self._module_api_callbacks.on_user_registration_callbacks: await callback(user_id) @wrap_as_background_process("send_renewals") @@ -198,8 +125,8 @@ async def send_renewal_email_to_user(self, user_id: str) -> None: """ # If a module supports sending a renewal email from here, do that, otherwise do # the legacy dance. - if self._on_legacy_send_mail_callback is not None: - await self._on_legacy_send_mail_callback(user_id) + if self._module_api_callbacks.on_legacy_send_mail_callback is not None: + await self._module_api_callbacks.on_legacy_send_mail_callback(user_id) return if not self._account_validity_renew_by_email_enabled: @@ -336,8 +263,10 @@ async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]: """ # If a module supports triggering a renew from here, do that, otherwise do the # legacy dance. - if self._on_legacy_renew_callback is not None: - return await self._on_legacy_renew_callback(renewal_token) + if self._module_api_callbacks.on_legacy_renew_callback is not None: + return await self._module_api_callbacks.on_legacy_renew_callback( + renewal_token + ) try: ( diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index b03c214b145a..b06f25b03c21 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,7 +14,7 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set from synapse.api.constants import Direction, Membership from synapse.events import EventBase @@ -29,7 +29,7 @@ class AdminHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main + self._store = hs.get_datastores().main self._device_handler = hs.get_device_handler() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -38,7 +38,7 @@ def __init__(self, hs: "HomeServer"): async def get_whois(self, user: UserID) -> JsonDict: connections = [] - sessions = await self.store.get_user_ip_and_agents(user) + sessions = await self._store.get_user_ip_and_agents(user) for session in sessions: connections.append( { @@ -57,7 +57,7 @@ async def get_whois(self, user: UserID) -> JsonDict: async def get_user(self, user: UserID) -> Optional[JsonDict]: """Function to get user details""" - user_info_dict = await self.store.get_user_by_id(user.to_string()) + user_info_dict = await self._store.get_user_by_id(user.to_string()) if user_info_dict is None: return None @@ -89,11 +89,11 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]: } # Add additional user metadata - profile = await self.store.get_profileinfo(user.localpart) - threepids = await self.store.user_get_threepids(user.to_string()) + profile = await self._store.get_profileinfo(user.localpart) + threepids = await self._store.user_get_threepids(user.to_string()) external_ids = [ ({"auth_provider": auth_provider, "external_id": external_id}) - for auth_provider, external_id in await self.store.get_external_ids_by_user( + for auth_provider, external_id in await self._store.get_external_ids_by_user( user.to_string() ) ] @@ -101,7 +101,7 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]: user_info_dict["avatar_url"] = profile.avatar_url user_info_dict["threepids"] = threepids user_info_dict["external_ids"] = external_ids - user_info_dict["erased"] = await self.store.is_user_erased(user.to_string()) + user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) return user_info_dict @@ -117,7 +117,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> The returned value is that returned by `writer.finished()`. """ # Get all rooms the user is in or has been in - rooms = await self.store.get_rooms_for_local_user_where_membership_is( + rooms = await self._store.get_rooms_for_local_user_where_membership_is( user_id, membership_list=( Membership.JOIN, @@ -131,7 +131,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # We only try and fetch events for rooms the user has been in. If # they've been e.g. invited to a room without joining then we handle # those separately. - rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id) + rooms_user_has_been_in = await self._store.get_rooms_user_has_been_in(user_id) for index, room in enumerate(rooms): room_id = room.room_id @@ -140,7 +140,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> "[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms) ) - forgotten = await self.store.did_forget(user_id, room_id) + forgotten = await self._store.did_forget(user_id, room_id) if forgotten: logger.info("[%s] User forgot room %d, ignoring", user_id, room_id) continue @@ -152,14 +152,14 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> if room.membership == Membership.INVITE: event_id = room.event_id - invite = await self.store.get_event(event_id, allow_none=True) + invite = await self._store.get_event(event_id, allow_none=True) if invite: invited_state = invite.unsigned["invite_room_state"] writer.write_invite(room_id, invite, invited_state) if room.membership == Membership.KNOCK: event_id = room.event_id - knock = await self.store.get_event(event_id, allow_none=True) + knock = await self._store.get_event(event_id, allow_none=True) if knock: knock_state = knock.unsigned["knock_room_state"] writer.write_knock(room_id, knock, knock_state) @@ -170,7 +170,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # were joined. We estimate that point by looking at the # stream_ordering of the last membership if it wasn't a join. if room.membership == Membership.JOIN: - stream_ordering = self.store.get_room_max_stream_ordering() + stream_ordering = self._store.get_room_max_stream_ordering() else: stream_ordering = room.stream_ordering @@ -197,7 +197,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # events that we have and then filtering, this isn't the most # efficient method perhaps but it does guarantee we get everything. while True: - events, _ = await self.store.paginate_room_events( + events, _ = await self._store.paginate_room_events( room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS ) if not events: @@ -252,16 +252,49 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> profile = await self.get_user(UserID.from_string(user_id)) if profile is not None: writer.write_profile(profile) + logger.info("[%s] Written profile", user_id) # Get all devices the user has devices = await self._device_handler.get_devices_by_user(user_id) writer.write_devices(devices) + logger.info("[%s] Written %s devices", user_id, len(devices)) # Get all connections the user has connections = await self.get_whois(UserID.from_string(user_id)) writer.write_connections( connections["devices"][""]["sessions"][0]["connections"] ) + logger.info("[%s] Written %s connections", user_id, len(connections)) + + # Get all account data the user has global and in rooms + global_data = await self._store.get_global_account_data_for_user(user_id) + by_room_data = await self._store.get_room_account_data_for_user(user_id) + writer.write_account_data("global", global_data) + for room_id in by_room_data: + writer.write_account_data(room_id, by_room_data[room_id]) + logger.info( + "[%s] Written account data for %s rooms", user_id, len(by_room_data) + ) + + # Get all media ids the user has + limit = 100 + start = 0 + while True: + media_ids, total = await self._store.get_local_media_by_user_paginate( + start, limit, user_id + ) + for media in media_ids: + writer.write_media_id(media["media_id"], media) + + logger.info( + "[%s] Written %d media_ids of %s", + user_id, + (start + len(media_ids)), + total, + ) + if (start + limit) >= total: + break + start += limit return writer.finished() @@ -340,6 +373,30 @@ def write_connections(self, connections: List[JsonDict]) -> None: """ raise NotImplementedError() + @abc.abstractmethod + def write_account_data( + self, file_name: str, account_data: Mapping[str, JsonDict] + ) -> None: + """Write the account data of a user. + + Args: + file_name: file name to write data + account_data: mapping of global or room account_data + """ + raise NotImplementedError() + + @abc.abstractmethod + def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + """Write the media's metadata of a user. + Exports only the metadata, as this can be fetched from the database via + read only. In order to access the files, a connection to the correct + media repository would be required. + + Args: + media_id: ID of the media. + media_metadata: Metadata of one media file. + """ + @abc.abstractmethod def finished(self) -> Any: """Called when all data has successfully been exported and written. diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5d1d21cdc8c6..ec3ab968e925 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -737,7 +737,7 @@ async def query_3pe( ) ret = [] - for (success, result) in results: + for success, result in results: if success: ret.extend(result) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 30f2d46c3c4a..308e38edea2d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -201,7 +201,7 @@ def __init__(self, hs: "HomeServer"): for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) if inst.is_enabled(): - self.checkers[inst.AUTH_TYPE] = inst # type: ignore + self.checkers[inst.AUTH_TYPE] = inst self.bcrypt_rounds = hs.config.registration.bcrypt_rounds @@ -815,7 +815,6 @@ async def refresh_token( now_ms = self._clock.time_msec() if existing_token.expiry_ts is not None and existing_token.expiry_ts < now_ms: - raise SynapseError( HTTPStatus.FORBIDDEN, "The supplied refresh token has expired", @@ -1543,6 +1542,17 @@ async def delete_access_tokens_for_user( async def add_threepid( self, user_id: str, medium: str, address: str, validated_at: int ) -> None: + """ + Adds an association between a user's Matrix ID and a third-party ID (email, + phone number). + + Args: + user_id: The ID of the user to associate. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + validated_at: The timestamp in ms of when the validation that the user owns + this third-party ID occurred. + """ # check if medium has a valid value if medium not in ["email", "msisdn"]: raise SynapseError( @@ -1567,43 +1577,44 @@ async def add_threepid( user_id, medium, address, validated_at, self.hs.get_clock().time_msec() ) + # Inform Synapse modules that a 3PID association has been created. + await self._third_party_rules.on_add_user_third_party_identifier( + user_id, medium, address + ) + + # Deprecated method for informing Synapse modules that a 3PID association + # has successfully been created. await self._third_party_rules.on_threepid_bind(user_id, medium, address) - async def delete_threepid( - self, user_id: str, medium: str, address: str, id_server: Optional[str] = None - ) -> bool: - """Attempts to unbind the 3pid on the identity servers and deletes it - from the local database. + async def delete_local_threepid( + self, user_id: str, medium: str, address: str + ) -> None: + """Deletes an association between a third-party ID and a user ID from the local + database. This method does not unbind the association from any identity servers. + + If `medium` is 'email' and a pusher is associated with this third-party ID, the + pusher will also be deleted. Args: user_id: ID of user to remove the 3pid from. medium: The medium of the 3pid being removed: "email" or "msisdn". address: The 3pid address to remove. - id_server: Use the given identity server when unbinding - any threepids. If None then will attempt to unbind using the - identity server specified when binding (if known). - - Returns: - Returns True if successfully unbound the 3pid on - the identity server, False if identity server doesn't support the - unbind API. """ - # 'Canonicalise' email addresses as per above if medium == "email": address = canonicalise_email(address) - identity_handler = self.hs.get_identity_handler() - result = await identity_handler.try_unbind_threepid( - user_id, {"medium": medium, "address": address, "id_server": id_server} + await self.store.user_delete_threepid(user_id, medium, address) + + # Inform Synapse modules that a 3PID association has been deleted. + await self._third_party_rules.on_remove_user_third_party_identifier( + user_id, medium, address ) - await self.store.user_delete_threepid(user_id, medium, address) if medium == "email": await self.store.delete_pusher_by_app_id_pushkey_user_id( app_id="m.email", pushkey=address, user_id=user_id ) - return result async def hash(self, password: str) -> str: """Computes a secure hash of password. @@ -2260,7 +2271,6 @@ async def check_3pid_auth( async def on_logged_out( self, user_id: str, device_id: Optional[str], access_token: str ) -> None: - # call all of the on_logged_out callbacks for callback in self.on_logged_out_callbacks: try: diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index d74d135c0c50..d31263c717b5 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -100,31 +100,28 @@ async def deactivate_account( # unbinding identity_server_supports_unbinding = True - # Retrieve the 3PIDs this user has bound to an identity server - threepids = await self.store.user_get_bound_threepids(user_id) - - for threepid in threepids: + # Attempt to unbind any known bound threepids to this account from identity + # server(s). + bound_threepids = await self.store.user_get_bound_threepids(user_id) + for threepid in bound_threepids: try: result = await self._identity_handler.try_unbind_threepid( - user_id, - { - "medium": threepid["medium"], - "address": threepid["address"], - "id_server": id_server, - }, + user_id, threepid["medium"], threepid["address"], id_server ) - identity_server_supports_unbinding &= result except Exception: # Do we want this to be a fatal error or should we carry on? logger.exception("Failed to remove threepid from ID server") raise SynapseError(400, "Failed to remove threepid from ID server") - await self.store.user_delete_threepid( + + identity_server_supports_unbinding &= result + + # Remove any local threepid associations for this account. + local_threepids = await self.store.user_get_threepids(user_id) + for threepid in local_threepids: + await self._auth_handler.delete_local_threepid( user_id, threepid["medium"], threepid["address"] ) - # Remove all 3PIDs this user has bound to the homeserver - await self.store.user_delete_threepids(user_id) - # delete any devices belonging to the user, which will also # delete corresponding access tokens. await self._device_handler.delete_all_devices_for_user(user_id) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 2ea52257cb9e..1fb23cc9bf51 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -14,7 +14,7 @@ import logging import string -from typing import TYPE_CHECKING, Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from typing_extensions import Literal @@ -485,7 +485,8 @@ async def edit_published_room_list( ) ) if canonical_alias: - room_aliases.append(canonical_alias) + # Ensure we do not mutate room_aliases. + room_aliases = list(room_aliases) + [canonical_alias] if not self.config.roomdirectory.is_publishing_room_allowed( user_id, room_id, room_aliases @@ -496,9 +497,11 @@ async def edit_published_room_list( raise SynapseError(403, "Not allowed to publish room") # Check if publishing is blocked by a third party module - allowed_by_third_party_rules = await ( - self.third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility + allowed_by_third_party_rules = ( + await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) ) ) if not allowed_by_third_party_rules: @@ -528,7 +531,7 @@ async def edit_published_appservice_room_list( async def get_aliases_for_room( self, requester: Requester, room_id: str - ) -> List[str]: + ) -> Sequence[str]: """ Get a list of the aliases that currently point to this room on this server """ diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d2188ca08f82..4e9c8d8db0c7 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -159,19 +159,22 @@ async def query_devices( # A map of destination -> user ID -> device IDs. remote_queries_not_in_cache: Dict[str, Dict[str, Iterable[str]]] = {} if remote_queries: - query_list: List[Tuple[str, Optional[str]]] = [] + user_ids = set() + user_and_device_ids: List[Tuple[str, str]] = [] for user_id, device_ids in remote_queries.items(): if device_ids: - query_list.extend( + user_and_device_ids.extend( (user_id, device_id) for device_id in device_ids ) else: - query_list.append((user_id, None)) + user_ids.add(user_id) ( user_ids_not_in_cache, remote_results, - ) = await self.store.get_user_devices_from_cache(query_list) + ) = await self.store.get_user_devices_from_cache( + user_ids, user_and_device_ids + ) # Check that the homeserver still shares a room with all cached users. # Note that this check may be slightly racy when a remote user leaves a @@ -1298,6 +1301,20 @@ async def _retrieve_cross_signing_keys_for_remote_user( return desired_key_data + async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool: + """Checks if the user has cross-signing set up + + Args: + user_id: The user to check + + Returns: + True if the user has cross-signing set up, False otherwise + """ + existing_master_key = await self.store.get_e2e_cross_signing_key( + user_id, "master" + ) + return existing_master_key is not None + def _check_cross_signing_key( key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 83f53ceb8891..50317ec753da 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -188,7 +188,6 @@ async def upload_room_keys( # XXX: perhaps we should use a finer grained lock here? async with self._upload_linearizer.queue(user_id): - # Check that the version we're trying to upload is the current version try: version_info = await self.store.get_e2e_room_keys_version_info(user_id) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index a23a8ce2a167..0db0bd7304ce 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -63,9 +63,18 @@ async def check_auth_rules_from_context( self._store, event, batched_auth_events ) auth_event_ids = event.auth_event_ids() - auth_events_by_id = await self._store.get_events(auth_event_ids) + if batched_auth_events: - auth_events_by_id.update(batched_auth_events) + # Copy the batched auth events to avoid mutating them. + auth_events_by_id = dict(batched_auth_events) + needed_auth_event_ids = set(auth_event_ids) - set(batched_auth_events) + if needed_auth_event_ids: + auth_events_by_id.update( + await self._store.get_events(needed_auth_event_ids) + ) + else: + auth_events_by_id = await self._store.get_events(auth_event_ids) + check_state_dependent_auth_rules(event, auth_events_by_id.values()) def compute_auth_events( @@ -202,7 +211,7 @@ async def check_restricted_join_rules( state_ids: StateMap[str], room_version: RoomVersion, user_id: str, - prev_member_event: Optional[EventBase], + prev_membership: Optional[str], ) -> None: """ Check whether a user can join a room without an invite due to restricted join rules. @@ -214,15 +223,14 @@ async def check_restricted_join_rules( state_ids: The state of the room as it currently is. room_version: The room version of the room being joined. user_id: The user joining the room. - prev_member_event: The current membership event for this user. + prev_membership: The current membership state for this user. `None` if the + user has never joined the room (equivalent to "leave"). Raises: AuthError if the user cannot join the room. """ # If the member is invited or currently joined, then nothing to do. - if prev_member_event and ( - prev_member_event.membership in (Membership.JOIN, Membership.INVITE) - ): + if prev_membership in (Membership.JOIN, Membership.INVITE): return # This is not a room with a restricted join rule, so we don't need to do the @@ -237,7 +245,6 @@ async def check_restricted_join_rules( # in any of them. allowed_rooms = await self.get_rooms_that_allow_join(state_ids) if not await self.is_user_in_rooms(allowed_rooms, user_id): - # If this is a remote request, the user might be in an allowed room # that we do not know about. if get_domain_from_id(user_id) != self._server_name: @@ -255,13 +262,14 @@ async def check_restricted_join_rules( ) async def has_restricted_join_rules( - self, state_ids: StateMap[str], room_version: RoomVersion + self, partial_state_ids: StateMap[str], room_version: RoomVersion ) -> bool: """ Return if the room has the proper join rules set for access via rooms. Args: - state_ids: The state of the room as it currently is. + state_ids: The state of the room as it currently is. May be full or partial + state. room_version: The room version of the room to query. Returns: @@ -272,7 +280,7 @@ async def has_restricted_join_rules( return False # If there's no join rule, then it defaults to invite (so this doesn't apply). - join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + join_rules_event_id = partial_state_ids.get((EventTypes.JoinRules, ""), None) if not join_rules_event_id: return False diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 949b69cb4189..33359f6ed748 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -23,7 +23,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, Requester, UserID from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -46,13 +46,12 @@ def __init__(self, hs: "HomeServer"): async def get_stream( self, - auth_user_id: str, + requester: Requester, pagin_config: PaginationConfig, timeout: int = 0, as_client_event: bool = True, affect_presence: bool = True, room_id: Optional[str] = None, - is_guest: bool = False, ) -> JsonDict: """Fetches the events stream for a given user.""" @@ -62,13 +61,12 @@ async def get_stream( raise SynapseError(403, "This room has been blocked on this server") # send any outstanding server notices to the user. - await self._server_notices_sender.on_user_syncing(auth_user_id) + await self._server_notices_sender.on_user_syncing(requester.user.to_string()) - auth_user = UserID.from_string(auth_user_id) presence_handler = self.hs.get_presence_handler() context = await presence_handler.user_syncing( - auth_user_id, + requester.user.to_string(), affect_presence=affect_presence, presence_state=PresenceState.ONLINE, ) @@ -82,10 +80,10 @@ async def get_stream( timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1)) stream_result = await self.notifier.get_events_for( - auth_user, + requester.user, pagin_config, timeout, - is_guest=is_guest, + is_guest=requester.is_guest, explicit_room_id=room_id, ) events = stream_result.events @@ -102,7 +100,7 @@ async def get_stream( if event.membership != Membership.JOIN: continue # Send down presence. - if event.state_key == auth_user_id: + if event.state_key == requester.user.to_string(): # Send down presence for everyone in the room. users: Iterable[str] = await self.store.get_users_in_room( event.room_id @@ -124,7 +122,9 @@ async def get_stream( chunks = self._event_serializer.serialize_events( events, time_now, - config=SerializeEventConfig(as_client_event=as_client_event), + config=SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ), ) chunk = { @@ -159,15 +159,16 @@ async def get_event( Returns: An event, or None if there is no event matching this ID. Raises: - SynapseError if there was a problem retrieving this event, or - AuthError if the user does not have the rights to inspect this - event. + AuthError: if the user does not have the rights to inspect this event. """ redact_behaviour = ( EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact ) event = await self.store.get_event( - event_id, check_room_id=room_id, redact_behaviour=redact_behaviour + event_id, + check_room_id=room_id, + redact_behaviour=redact_behaviour, + allow_none=True, ) if not event: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7f64130e0aa1..80156ef343aa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -49,6 +49,7 @@ FederationPullAttemptBackoffError, HttpResponseException, NotFoundError, + PartialStateConflictError, RequestSendFailed, SynapseError, ) @@ -56,7 +57,7 @@ from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict @@ -68,7 +69,6 @@ ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, ) -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter @@ -392,7 +392,7 @@ async def _maybe_backfill_inner( get_prev_content=False, ) - # We set `check_history_visibility_only` as we might otherwise get false + # We unset `filter_out_erased_senders` as we might otherwise get false # positives from users having been erased. filtered_extremities = await filter_events_for_server( self._storage_controllers, @@ -400,7 +400,8 @@ async def _maybe_backfill_inner( self.server_name, events_to_check, redact=False, - check_history_visibility_only=True, + filter_out_erased_senders=False, + filter_out_remote_partial_state_events=False, ) if filtered_extremities: extremities_to_request.append(bp.event_id) @@ -952,7 +953,20 @@ async def on_make_join_request( # # Note that this requires the /send_join request to come back to the # same server. + prev_event_ids = None if room_version.msc3083_join_rules: + # Note that the room's state can change out from under us and render our + # nice join rules-conformant event non-conformant by the time we build the + # event. When this happens, our validation at the end fails and we respond + # to the requesting server with a 403, which is misleading — it indicates + # that the user is not allowed to join the room and the joining server + # should not bother retrying via this homeserver or any others, when + # in fact we've just messed up with building the event. + # + # To reduce the likelihood of this race, we capture the forward extremities + # of the room (prev_event_ids) just before fetching the current state, and + # hope that the state we fetch corresponds to the prev events we chose. + prev_event_ids = await self.store.get_prev_events_for_room(room_id) state_ids = await self._state_storage_controller.get_current_state_ids( room_id ) @@ -990,15 +1004,21 @@ async def on_make_join_request( ) try: - event, context = await self.event_creation_handler.create_new_client_event( - builder=builder + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event( + builder=builder, + prev_event_ids=prev_event_ids, ) except SynapseError as e: logger.warning("Failed to create join to %s because %s", room_id, e) raise # Ensure the user can even join the room. - await self._federation_event_handler.check_join_restrictions(context, event) + await self._federation_event_handler.check_join_restrictions( + unpersisted_context, event + ) # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` @@ -1178,7 +1198,7 @@ async def on_make_leave_request( }, ) - event, context = await self.event_creation_handler.create_new_client_event( + event, _ = await self.event_creation_handler.create_new_client_event( builder=builder ) @@ -1228,12 +1248,13 @@ async def on_make_knock_request( }, ) - event, context = await self.event_creation_handler.create_new_client_event( - builder=builder - ) + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event(builder=builder) event_allowed, _ = await self.third_party_event_rules.check_event_allowed( - event, context + event, unpersisted_context ) if not event_allowed: logger.warning("Creation of knock %s forbidden by third-party rules", event) @@ -1311,7 +1332,13 @@ async def on_backfill_request( ) events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, events + self._storage_controllers, + origin, + self.server_name, + events, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) return events @@ -1342,7 +1369,13 @@ async def get_persisted_pdu( await self._event_auth_handler.assert_host_in_room(event.room_id, origin) events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, [event] + self._storage_controllers, + origin, + self.server_name, + [event], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) event = events[0] return event @@ -1370,7 +1403,13 @@ async def on_get_missing_events( ) missing_events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, missing_events + self._storage_controllers, + origin, + self.server_name, + missing_events, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) return missing_events @@ -1406,15 +1445,20 @@ async def exchange_third_party_invite( try: ( event, - context, + unpersisted_context, ) = await self.event_creation_handler.create_new_client_event( builder=builder ) - event, context = await self.add_display_name_to_third_party_invite( - room_version_obj, event_dict, event, context + ( + event, + unpersisted_context, + ) = await self.add_display_name_to_third_party_invite( + room_version_obj, event_dict, event, unpersisted_context ) + context = await unpersisted_context.persist(event) + EventValidator().validate_new(event, self.config) # We need to tell the transaction queue to send this out, even @@ -1483,14 +1527,19 @@ async def on_exchange_third_party_invite_request( try: ( event, - context, + unpersisted_context, ) = await self.event_creation_handler.create_new_client_event( builder=builder ) - event, context = await self.add_display_name_to_third_party_invite( - room_version_obj, event_dict, event, context + ( + event, + unpersisted_context, + ) = await self.add_display_name_to_third_party_invite( + room_version_obj, event_dict, event, unpersisted_context ) + context = await unpersisted_context.persist(event) + try: validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context(event) @@ -1522,8 +1571,8 @@ async def add_display_name_to_third_party_invite( room_version_obj: RoomVersion, event_dict: JsonDict, event: EventBase, - context: EventContext, - ) -> Tuple[EventBase, EventContext]: + context: UnpersistedEventContextBase, + ) -> Tuple[EventBase, UnpersistedEventContextBase]: key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], @@ -1557,11 +1606,14 @@ async def add_display_name_to_third_party_invite( room_version_obj, event_dict ) EventValidator().validate_builder(builder) - event, context = await self.event_creation_handler.create_new_client_event( - builder=builder - ) + + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event(builder=builder) + EventValidator().validate_new(event, self.config) - return event, context + return event, unpersisted_context async def _check_signature(self, event: EventBase, context: EventContext) -> None: """ @@ -1861,6 +1913,11 @@ async def _sync_partial_state_room( logger.info("Updating current state for %s", room_id) # TODO(faster_joins): notify workers in notify_room_un_partial_stated # https://github.com/matrix-org/synapse/issues/12994 + # + # NB: there's a potential race here. If room is purged just before we + # call this, we _might_ end up inserting rows into current_state_events. + # (The logic is hard to chase through.) We think this is fine, but if + # not the HS admin should purge the room again. await self.state_handler.update_current_state(room_id) logger.info("Handling any pending device list updates") diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index e037acbca2bf..b7136f8d1ccb 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -47,6 +47,7 @@ FederationError, FederationPullAttemptBackoffError, HttpResponseException, + PartialStateConflictError, RequestSendFailed, SynapseError, ) @@ -58,7 +59,7 @@ validate_event_for_room_version, ) from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo from synapse.logging.context import nested_logging_context from synapse.logging.opentracing import ( @@ -74,7 +75,6 @@ ReplicationFederationSendEventsRestServlet, ) from synapse.state import StateResolutionStore -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( PersistedEventPosition, @@ -426,7 +426,9 @@ async def on_send_membership_event( return event, context async def check_join_restrictions( - self, context: EventContext, event: EventBase + self, + context: UnpersistedEventContextBase, + event: EventBase, ) -> None: """Check that restrictions in restricted join rules are matched @@ -439,16 +441,17 @@ async def check_join_restrictions( # Check if the user is already in the room or invited to the room. user_id = event.state_key prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - prev_member_event = None + prev_membership = None if prev_member_event_id: prev_member_event = await self._store.get_event(prev_member_event_id) + prev_membership = prev_member_event.membership # Check if the member should be allowed access via membership in a space. await self._event_auth_handler.check_restricted_join_rules( prev_state_ids, event.room_version, user_id, - prev_member_event, + prev_membership, ) @trace @@ -524,11 +527,57 @@ async def process_remote_join( "Peristing join-via-remote %s (partial_state: %s)", event, partial_state ) with nested_logging_context(suffix=event.event_id): + if partial_state: + # When handling a second partial state join into a partial state room, + # the returned state will exclude the membership from the first join. To + # preserve prior memberships, we try to compute the partial state before + # the event ourselves if we know about any of the prev events. + # + # When we don't know about any of the prev events, it's fine to just use + # the returned state, since the new join will create a new forward + # extremity, and leave the forward extremity containing our prior + # memberships alone. + prev_event_ids = set(event.prev_event_ids()) + seen_event_ids = await self._store.have_events_in_timeline( + prev_event_ids + ) + missing_event_ids = prev_event_ids - seen_event_ids + + state_maps_to_resolve: List[StateMap[str]] = [] + + # Fetch the state after the prev events that we know about. + state_maps_to_resolve.extend( + ( + await self._state_storage_controller.get_state_groups_ids( + room_id, seen_event_ids, await_full_state=False + ) + ).values() + ) + + # When there are prev events we do not have the state for, we state + # resolve with the state returned by the remote homeserver. + if missing_event_ids or len(state_maps_to_resolve) == 0: + state_maps_to_resolve.append( + {(e.type, e.state_key): e.event_id for e in state} + ) + + state_ids_before_event = ( + await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version.identifier, + state_maps_to_resolve, + event_map=None, + state_res_store=StateResolutionStore(self._store), + ) + ) + else: + state_ids_before_event = { + (e.type, e.state_key): e.event_id for e in state + } + context = await self._state_handler.compute_event_context( event, - state_ids_before_event={ - (e.type, e.state_key): e.event_id for e in state - }, + state_ids_before_event=state_ids_before_event, partial_state=partial_state, ) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 848e46eb9ba6..bf0f7acf8076 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -219,28 +219,31 @@ async def bind_threepid( data = json_decoder.decode(e.msg) # XXX WAT? return data - async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: - """Attempt to remove a 3PID from an identity server, or if one is not provided, all - identity servers we're aware the binding is present on + async def try_unbind_threepid( + self, mxid: str, medium: str, address: str, id_server: Optional[str] + ) -> bool: + """Attempt to remove a 3PID from one or more identity servers. Args: mxid: Matrix user ID of binding to be removed - threepid: Dict with medium & address of binding to be - removed, and an optional id_server. + medium: The medium of the third-party ID. + address: The address of the third-party ID. + id_server: An identity server to attempt to unbind from. If None, + attempt to remove the association from all identity servers + known to potentially have it. Raises: - SynapseError: If we failed to contact the identity server + SynapseError: If we failed to contact one or more identity servers. Returns: - True on success, otherwise False if the identity - server doesn't support unbinding (or no identity server found to - contact). + True on success, otherwise False if the identity server doesn't + support unbinding (or no identity server to contact was found). """ - if threepid.get("id_server"): - id_servers = [threepid["id_server"]] + if id_server: + id_servers = [id_server] else: id_servers = await self.store.get_id_servers_user_bound( - user_id=mxid, medium=threepid["medium"], address=threepid["address"] + mxid, medium, address ) # We don't know where to unbind, so we don't have a choice but to return @@ -249,20 +252,21 @@ async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: changed = True for id_server in id_servers: - changed &= await self.try_unbind_threepid_with_id_server( - mxid, threepid, id_server + changed &= await self._try_unbind_threepid_with_id_server( + mxid, medium, address, id_server ) return changed - async def try_unbind_threepid_with_id_server( - self, mxid: str, threepid: dict, id_server: str + async def _try_unbind_threepid_with_id_server( + self, mxid: str, medium: str, address: str, id_server: str ) -> bool: """Removes a binding from an identity server Args: mxid: Matrix user ID of binding to be removed - threepid: Dict with medium & address of binding to be removed + medium: The medium of the third-party ID + address: The address of the third-party ID id_server: Identity server to unbind from Raises: @@ -286,7 +290,7 @@ async def try_unbind_threepid_with_id_server( content = { "mxid": mxid, - "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, + "threepid": {"medium": medium, "address": address}, } # we abuse the federation http client to sign the request, but we have to send it @@ -319,12 +323,7 @@ async def try_unbind_threepid_with_id_server( except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") - await self.store.remove_user_bound_threepid( - user_id=mxid, - medium=threepid["medium"], - address=threepid["address"], - id_server=id_server, - ) + await self.store.remove_user_bound_threepid(mxid, medium, address, id_server) return changed diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 191529bd8e27..b3be7a86f0df 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -124,7 +124,6 @@ async def _snapshot_all_rooms( as_client_event: bool = True, include_archived: bool = False, ) -> JsonDict: - memberships = [Membership.INVITE, Membership.JOIN] if include_archived: memberships.append(Membership.LEAVE) @@ -154,9 +153,8 @@ async def _snapshot_all_rooms( tags_by_room = await self.store.get_tags_for_user(user_id) - account_data, account_data_by_room = await self.store.get_account_data_for_user( - user_id - ) + account_data = await self.store.get_global_account_data_for_user(user_id) + account_data_by_room = await self.store.get_room_account_data_for_user(user_id) public_room_ids = await self.store.get_public_room_ids() @@ -320,11 +318,9 @@ async def room_initial_sync( ) is_peeking = member_event_id is None - user_id = requester.user.to_string() - if membership == Membership.JOIN: result = await self._room_initial_sync_joined( - user_id, room_id, pagin_config, membership, is_peeking + requester, room_id, pagin_config, membership, is_peeking ) elif membership == Membership.LEAVE: # The member_event_id will always be available if membership is set @@ -332,10 +328,16 @@ async def room_initial_sync( assert member_event_id result = await self._room_initial_sync_parted( - user_id, room_id, pagin_config, membership, member_event_id, is_peeking + requester, + room_id, + pagin_config, + membership, + member_event_id, + is_peeking, ) account_data_events = [] + user_id = requester.user.to_string() tags = await self.store.get_tags_for_room(user_id, room_id) if tags: account_data_events.append( @@ -352,7 +354,7 @@ async def room_initial_sync( async def _room_initial_sync_parted( self, - user_id: str, + requester: Requester, room_id: str, pagin_config: PaginationConfig, membership: str, @@ -371,13 +373,17 @@ async def _room_initial_sync_parted( ) messages = await filter_events_for_client( - self._storage_controllers, user_id, messages, is_peeking=is_peeking + self._storage_controllers, + requester.user.to_string(), + messages, + is_peeking=is_peeking, ) start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token) end_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, stream_token) time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) return { "membership": membership, @@ -385,14 +391,18 @@ async def _room_initial_sync_parted( "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events( + messages, time_now, config=serialize_options + ) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), }, "state": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(room_state.values(), time_now) + self._event_serializer.serialize_events( + room_state.values(), time_now, config=serialize_options + ) ), "presence": [], "receipts": [], @@ -400,7 +410,7 @@ async def _room_initial_sync_parted( async def _room_initial_sync_joined( self, - user_id: str, + requester: Requester, room_id: str, pagin_config: PaginationConfig, membership: str, @@ -412,9 +422,12 @@ async def _room_initial_sync_joined( # TODO: These concurrently time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) # Don't bundle aggregations as this is a deprecated API. state = self._event_serializer.serialize_events( - current_state.values(), time_now + current_state.values(), + time_now, + config=serialize_options, ) now_token = self.hs.get_event_sources().get_current_token() @@ -452,7 +465,10 @@ async def get_receipts() -> List[JsonDict]: if not receipts: return [] - return ReceiptEventSource.filter_out_private_receipts(receipts, user_id) + return ReceiptEventSource.filter_out_private_receipts( + receipts, + requester.user.to_string(), + ) presence, receipts, (messages, token) = await make_deferred_yieldable( gather_results( @@ -471,20 +487,23 @@ async def get_receipts() -> List[JsonDict]: ) messages = await filter_events_for_client( - self._storage_controllers, user_id, messages, is_peeking=is_peeking + self._storage_controllers, + requester.user.to_string(), + messages, + is_peeking=is_peeking, ) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) end_token = now_token - time_now = self.clock.time_msec() - ret = { "room_id": room_id, "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events( + messages, time_now, config=serialize_options + ) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0b2d23706ae6..ecd05c564212 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -38,6 +38,7 @@ Codes, ConsentNotGivenError, NotFoundError, + PartialStateConflictError, ShadowBanError, SynapseError, UnstableSpecAuthError, @@ -48,8 +49,8 @@ from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase, relation_from_event from synapse.events.builder import EventBuilder -from synapse.events.snapshot import EventContext -from synapse.events.utils import maybe_upsert_event_field +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase +from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler from synapse.logging import opentracing @@ -57,7 +58,6 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.replication.http.send_events import ReplicationSendEventsRestServlet -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( MutableStateMap, @@ -245,8 +245,11 @@ async def get_state_events( ) room_state = room_state_events[membership_event_id] - now = self.clock.time_msec() - events = self._event_serializer.serialize_events(room_state.values(), now) + events = self._event_serializer.serialize_events( + room_state.values(), + self.clock.time_msec(), + config=SerializeEventConfig(requester=requester), + ) return events async def _user_can_see_state_at_event( @@ -499,10 +502,8 @@ def __init__(self, hs: "HomeServer"): self.request_ratelimiter = hs.get_request_ratelimiter() - # We arbitrarily limit concurrent event creation for a room to 5. - # This is to stop us from diverging history *too* much. - # Beeper: limit changed from 5 -> 1, see: - # https://mau.dev/maunium/synapse/-/commit/d4ec0b6762852ce2e7e9a741db57ad459922c5a3 + # We limit concurrent event creation for a room to 1. This prevents state resolution + # from occurring when sending bursts of events to a local room self.limiter = Linearizer(max_count=1, name="room_event_creation_limit") self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() @@ -576,7 +577,7 @@ async def create_event( state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """ Given a dict from a client, create a new event. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for @@ -710,7 +711,7 @@ async def create_event( builder.internal_metadata.historical = historical - event, context = await self.create_new_client_event( + event, unpersisted_context = await self.create_new_client_event( builder=builder, requester=requester, allow_no_prev_events=allow_no_prev_events, @@ -739,7 +740,7 @@ async def create_event( assert state_map is not None prev_event_id = state_map.get((EventTypes.Member, event.sender)) else: - prev_state_ids = await context.get_prev_state_ids( + prev_state_ids = await unpersisted_context.get_prev_state_ids( StateFilter.from_types([(EventTypes.Member, None)]) ) prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) @@ -764,8 +765,7 @@ async def create_event( ) self.validator.validate_new(event, self.config) - - return event, context + return event, unpersisted_context async def _is_exempt_from_privacy_policy( self, builder: EventBuilder, requester: Requester @@ -987,10 +987,11 @@ async def create_and_send_nonmember_event( # a situation where event persistence can't keep up, causing # extremities to pile up, which in turn leads to state resolution # taking longer. - async with self.limiter.queue(event_dict["room_id"]): + room_id = event_dict["room_id"] + async with self.limiter.queue(room_id): if txn_id: event = await self.get_event_from_transaction( - requester, txn_id, event_dict["room_id"] + requester, txn_id, room_id ) if event: # we know it was persisted, so must have a stream ordering @@ -1000,12 +1001,24 @@ async def create_and_send_nonmember_event( event.internal_metadata.stream_ordering, ) + # If we don't have any prev event IDs specified then we need to + # check that the host is in the room (as otherwise populating the + # prev events will fail), at which point we may as well check the + # local user is in the room. + if not prev_event_ids: + user_id = requester.user.to_string() + is_user_in_room = await self.store.check_local_user_in_room( + user_id, room_id + ) + if not is_user_in_room: + raise AuthError(403, f"User {user_id} not in room {room_id}") + # Try several times, it could fail with PartialStateConflictError # in handle_new_client_event, cf comment in except block. max_retries = 5 for i in range(max_retries): try: - event, context = await self.create_event( + event, unpersisted_context = await self.create_event( requester, event_dict, txn_id=txn_id, @@ -1016,6 +1029,7 @@ async def create_and_send_nonmember_event( historical=historical, depth=depth, ) + context = await unpersisted_context.persist(event) assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( event.sender, @@ -1085,13 +1099,14 @@ async def create_new_client_event( state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """Create a new event for a local client. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for the event using the parameters state_map and current_state_group, thus these parameters must be provided in this case if for_batch is True. The subsequently created event and context are suitable for being batched up and bulk persisted to the database - with other similarly created events. + with other similarly created events. Note that this returns an UnpersistedEventContext, + which must be converted to an EventContext before it can be sent to the DB. Args: builder: @@ -1133,7 +1148,7 @@ async def create_new_client_event( batch persisting Returns: - Tuple of created event, context + Tuple of created event, UnpersistedEventContext """ # Strip down the state_event_ids to only what we need to auth the event. # For example, we don't need extra m.room.member that don't match event.sender @@ -1189,14 +1204,20 @@ async def create_new_client_event( if for_batch: assert prev_event_ids is not None assert state_map is not None - assert current_state_group is not None auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map) event = await builder.build( prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth ) - context = await self.state.compute_event_context_for_batched( - event, state_map, current_state_group + + context: UnpersistedEventContextBase = ( + await self.state.calculate_context_info( + event, + state_ids_before_event=state_map, + partial_state=False, + state_group_before_event=current_state_group, + ) ) + else: event = await builder.build( prev_event_ids=prev_event_ids, @@ -1246,16 +1267,17 @@ async def create_new_client_event( state_map_for_event[(data.event_type, data.state_key)] = state_id - context = await self.state.compute_event_context( + # TODO(faster_joins): check how MSC2716 works and whether we can have + # partial state here + # https://github.com/matrix-org/synapse/issues/13003 + context = await self.state.calculate_context_info( event, state_ids_before_event=state_map_for_event, - # TODO(faster_joins): check how MSC2716 works and whether we can have - # partial state here - # https://github.com/matrix-org/synapse/issues/13003 partial_state=False, ) + else: - context = await self.state.compute_event_context(event) + context = await self.state.calculate_context_info(event) if requester: context.app_service = requester.app_service @@ -1328,7 +1350,11 @@ async def _validate_event_relation(self, event: EventBase) -> None: relation.parent_id, event.type, aggregation_key, event.sender ) if already_exists: - raise SynapseError(400, "Can't send same reaction twice") + raise SynapseError( + 400, + "Can't send same reaction twice", + errcode=Codes.DUPLICATE_ANNOTATION, + ) # Don't attempt to start a thread if the parent event is a relation. elif relation.rel_type == RelationTypes.THREAD: @@ -2051,7 +2077,7 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: max_retries = 5 for i in range(max_retries): try: - event, context = await self.create_event( + event, unpersisted_context = await self.create_event( requester, { "type": EventTypes.Dummy, @@ -2060,6 +2086,7 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: "sender": user_id, }, ) + context = await unpersisted_context.persist(event) event.internal_metadata.proactively_send = False @@ -2102,9 +2129,9 @@ def _expire_rooms_to_exclude_from_dummy_event_insertion(self) -> None: async def _rebuild_event_after_third_party_rules( self, third_party_result: dict, original_event: EventBase - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: # the third_party_event_rules want to replace the event. - # we do some basic checks, and then return the replacement event and context. + # we do some basic checks, and then return the replacement event. # Construct a new EventBuilder and validate it, which helps with the # rest of these checks. @@ -2158,5 +2185,6 @@ async def _rebuild_event_after_third_party_rules( # we rebuild the event context, to be on the safe side. If nothing else, # delta_ids might need an update. - context = await self.state.compute_event_context(event) + context = await self.state.calculate_context_info(event) + return event, context diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 0b5fb889f92e..ca8885476aa8 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -591,7 +591,9 @@ async def get_messages( time_now = self.clock.time_msec() - serialize_options = SerializeEventConfig(as_client_event=as_client_event) + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) chunk = { "chunk": ( @@ -693,7 +695,7 @@ async def _shutdown_and_purge_room( await self._storage_controllers.purge_events.purge_room(room_id) - logger.info("complete") + logger.info("purge complete for room_id %s", room_id) self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE except Exception: f = Failure() diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 87af31aa2706..4ad223357384 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -777,7 +777,6 @@ async def _on_shutdown(self) -> None: ) if self.unpersisted_users_changes: - await self.store.update_presence( [ self.user_to_current_state[user_id] @@ -823,7 +822,6 @@ async def _update_states( now = self.clock.time_msec() with Measure(self.clock, "presence_update_states"): - # NOTE: We purposefully don't await between now and when we've # calculated what we want to do with the new states, to avoid races. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 4bf9a047a3bc..9a81a77cbd3b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -63,7 +63,7 @@ def __init__(self, hs: "HomeServer"): self._third_party_rules = hs.get_third_party_event_rules() - async def get_profile(self, user_id: str) -> JsonDict: + async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): @@ -81,7 +81,7 @@ async def get_profile(self, user_id: str) -> JsonDict: destination=target_user.domain, query_type="profile", args={"user_id": user_id}, - ignore_backoff=True, + ignore_backoff=ignore_backoff, ) return result except RequestSendFailed as e: diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index c9be10a574f5..0ff25b30339f 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Tuple from synapse.api.constants import EduTypes, ReceiptTypes from synapse.appservice import ApplicationService @@ -190,7 +190,7 @@ def __init__(self, hs: "HomeServer"): @staticmethod def filter_out_private_receipts( - rooms: List[JsonDict], user_id: str + rooms: Sequence[JsonDict], user_id: str ) -> List[JsonDict]: """ Filters a list of serialized receipts (as returned by /sync and /initialSync) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c611efb760e4..6b110dcb6e4d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -476,7 +476,7 @@ async def _create_and_join_rooms(self, user_id: str) -> None: # create room expects the localpart of the room alias config["room_alias_name"] = room_alias.localpart - info, _ = await room_creation_handler.create_room( + room_id, _, _ = await room_creation_handler.create_room( fake_requester, config=config, ratelimit=False, @@ -490,7 +490,7 @@ async def _create_and_join_rooms(self, user_id: str) -> None: user_id, authenticated_entity=self._server_name ), target=UserID.from_string(user_id), - room_id=info["room_id"], + room_id=room_id, # Since it was just created, there are no remote hosts. remote_room_hosts=[], action="join", @@ -596,14 +596,20 @@ async def _auto_join_rooms(self, user_id: str) -> None: Args: user_id: The user to join """ + # If there are no rooms to auto-join, just bail. + if not self.hs.config.registration.auto_join_rooms: + return + # auto-join the user to any rooms we're supposed to dump them into # try to create the room if we're the first real user on the server. Note # that an auto-generated support or bot user is not a real user and will never be # the user to create the room should_auto_create_rooms = False - is_real_user = await self.store.is_real_user(user_id) - if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user: + if ( + self.hs.config.registration.autocreate_auto_join_rooms + and await self.store.is_real_user(user_id) + ): count = await self.store.count_real_users() should_auto_create_rooms = count == 1 diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 0fb15391e07e..1d09fdf13519 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -20,6 +20,7 @@ from synapse.api.constants import Direction, EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event +from synapse.events.utils import SerializeEventConfig from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import trace from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent @@ -60,13 +61,12 @@ class BundledAggregations: Some values require additional processing during serialization. """ - annotations: Optional[JsonDict] = None references: Optional[JsonDict] = None replace: Optional[EventBase] = None thread: Optional[_ThreadAggregation] = None def __bool__(self) -> bool: - return bool(self.annotations or self.references or self.replace or self.thread) + return bool(self.references or self.replace or self.thread) class RelationsHandler: @@ -152,16 +152,23 @@ async def get_relations( ) now = self._clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) return_value: JsonDict = { "chunk": self._event_serializer.serialize_events( - events, now, bundle_aggregations=aggregations + events, + now, + bundle_aggregations=aggregations, + config=serialize_options, ), } if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. return_value["original_event"] = self._event_serializer.serialize_event( - event, now, bundle_aggregations=None + event, + now, + bundle_aggregations=None, + config=serialize_options, ) if next_token: @@ -227,67 +234,6 @@ async def redact_events_related_to( e.msg, ) - async def get_annotations_for_events( - self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() - ) -> Dict[str, List[JsonDict]]: - """Get a list of annotations to the given events, grouped by event type and - aggregation key, sorted by count. - - This is used e.g. to get the what and how many reactions have happened - on an event. - - Args: - event_ids: Fetch events that relate to these event IDs. - ignored_users: The users ignored by the requesting user. - - Returns: - A map of event IDs to a list of groups of annotations that match. - Each entry is a dict with `type`, `key` and `count` fields. - """ - # Get the base results for all users. - full_results = await self._main_store.get_aggregation_groups_for_events( - event_ids - ) - - # Avoid additional logic if there are no ignored users. - if not ignored_users: - return { - event_id: results - for event_id, results in full_results.items() - if results - } - - # Then subtract off the results for any ignored users. - ignored_results = await self._main_store.get_aggregation_groups_for_users( - [event_id for event_id, results in full_results.items() if results], - ignored_users, - ) - - filtered_results = {} - for event_id, results in full_results.items(): - # If no annotations, skip. - if not results: - continue - - # If there are not ignored results for this event, copy verbatim. - if event_id not in ignored_results: - filtered_results[event_id] = results - continue - - # Otherwise, subtract out the ignored results. - event_ignored_results = ignored_results[event_id] - for result in results: - key = (result["type"], result["key"]) - if key in event_ignored_results: - # Ensure to not modify the cache. - result = result.copy() - result["count"] -= event_ignored_results[key] - if result["count"] <= 0: - continue - filtered_results.setdefault(event_id, []).append(result) - - return filtered_results - async def get_references_for_events( self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() ) -> Dict[str, List[_RelatedEvent]]: @@ -531,17 +477,6 @@ async def get_bundled_aggregations( # (as that is what makes it part of the thread). relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD - async def _fetch_annotations() -> None: - """Fetch any annotations (ie, reactions) to bundle with this event.""" - annotations_by_event_id = await self.get_annotations_for_events( - events_by_id.keys(), ignored_users=ignored_users - ) - for event_id, annotations in annotations_by_event_id.items(): - if annotations: - results.setdefault(event_id, BundledAggregations()).annotations = { - "chunk": annotations - } - async def _fetch_references() -> None: """Fetch any references to bundle with this event.""" references_by_event_id = await self.get_references_for_events( @@ -575,7 +510,6 @@ async def _fetch_edits() -> None: await make_deferred_yieldable( gather_results( ( - run_in_background(_fetch_annotations), run_in_background(_fetch_references), run_in_background(_fetch_edits), ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7b7f3b088f5d..e326515c2441 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -45,6 +45,7 @@ Codes, LimitExceededError, NotFoundError, + PartialStateConflictError, StoreError, SynapseError, ) @@ -52,11 +53,11 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase +from synapse.events.snapshot import UnpersistedEventContext from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.handlers.relations import BundledAggregations from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.streams import EventSource from synapse.types import ( JsonDict, @@ -224,7 +225,7 @@ async def upgrade_room( # the required power level to send the tombstone event. ( tombstone_event, - tombstone_context, + tombstone_unpersisted_context, ) = await self.event_creation_handler.create_event( requester, { @@ -238,6 +239,9 @@ async def upgrade_room( }, }, ) + tombstone_context = await tombstone_unpersisted_context.persist( + tombstone_event + ) validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( tombstone_event @@ -581,7 +585,7 @@ async def clone_existing_room( new_room_id, # we expect to override all the presets with initial_state, so this is # somewhat arbitrary. - preset_config=RoomCreationPreset.PRIVATE_CHAT, + room_config={"preset": RoomCreationPreset.PRIVATE_CHAT}, invite_list=[], initial_state=initial_state, creation_content=creation_content, @@ -706,13 +710,14 @@ async def create_room( config: JsonDict, ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, - ) -> Tuple[dict, int]: + ) -> Tuple[str, Optional[RoomAlias], int]: """Creates a new room. Args: - requester: - The user who requested the room creation. - config : A dict of configuration options. + requester: The user who requested the room creation. + config: A dict of configuration options. This will be the body of + a /createRoom request; see + https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom ratelimit: set to False to disable the rate limiter creator_join_profile: @@ -723,14 +728,17 @@ async def create_room( `avatar_url` and/or `displayname`. Returns: - First, a dict containing the keys `room_id` and, if an alias - was, requested, `room_alias`. Secondly, the stream_id of the - last persisted event. + A 3-tuple containing: + - the room ID; + - if requested, the room alias, otherwise None; and + - the `stream_id` of the last persisted event. Raises: - SynapseError if the room ID couldn't be stored, 3pid invitation config - validation failed, or something went horribly wrong. - ResourceLimitError if server is blocked to some resource being - exceeded + SynapseError: + if the room ID couldn't be stored, 3pid invitation config + validation failed, or something went horribly wrong. + ResourceLimitError: + if server is blocked to some resource being + exceeded """ user_id = requester.user.to_string() @@ -880,9 +888,11 @@ async def create_room( ) # Check whether this visibility value is blocked by a third party module - allowed_by_third_party_rules = await ( - self.third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility + allowed_by_third_party_rules = ( + await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) ) ) if not allowed_by_third_party_rules: @@ -910,13 +920,6 @@ async def create_room( check_membership=False, ) - preset_config = config.get( - "preset", - RoomCreationPreset.PRIVATE_CHAT - if visibility == "private" - else RoomCreationPreset.PUBLIC_CHAT, - ) - raw_initial_state = config.get("initial_state", []) initial_state = OrderedDict() @@ -935,7 +938,7 @@ async def create_room( ) = await self._send_events_for_new_room( requester, room_id, - preset_config=preset_config, + room_config=config, invite_list=invite_list, initial_state=initial_state, creation_content=creation_content, @@ -944,48 +947,6 @@ async def create_room( creator_join_profile=creator_join_profile, ) - if "name" in config: - name = config["name"] - ( - name_event, - last_stream_id, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Name, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"name": name}, - }, - ratelimit=False, - prev_event_ids=[last_sent_event_id], - depth=depth, - ) - last_sent_event_id = name_event.event_id - depth += 1 - - if "topic" in config: - topic = config["topic"] - ( - topic_event, - last_stream_id, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Topic, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"topic": topic}, - }, - ratelimit=False, - prev_event_ids=[last_sent_event_id], - depth=depth, - ) - last_sent_event_id = topic_event.event_id - depth += 1 - # we avoid dropping the lock between invites, as otherwise joins can # start coming in and making the createRoom slow. # @@ -1043,11 +1004,6 @@ async def create_room( last_sent_event_id = member_event_id depth += 1 - result = {"room_id": room_id} - - if room_alias: - result["room_alias"] = room_alias.to_string() - # Always wait for room creation to propagate before returning await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(room_id), @@ -1055,13 +1011,13 @@ async def create_room( last_stream_id, ) - return result, last_stream_id + return room_id, room_alias, last_stream_id async def _send_events_for_new_room( self, creator: Requester, room_id: str, - preset_config: str, + room_config: JsonDict, invite_list: List[str], initial_state: MutableStateMap, creation_content: JsonDict, @@ -1078,11 +1034,33 @@ async def _send_events_for_new_room( Rate limiting should already have been applied by this point. + Args: + creator: + the user requesting the room creation + room_id: + room id for the room being created + room_config: + A dict of configuration options. This will be the body of + a /createRoom request; see + https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom + invite_list: + a list of user ids to invite to the room + initial_state: + A list of state events to set in the new room. + creation_content: + Extra keys, such as m.federate, to be added to the content of the m.room.create event. + room_alias: + alias for the room + power_level_content_override: + The power level content to override in the default power level event. + creator_join_profile: + Set to override the displayname and avatar for the creating + user in this room. + Returns: A tuple containing the stream ID, event ID and depth of the last event sent to the room. """ - creator_id = creator.user.to_string() event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} depth = 1 @@ -1093,9 +1071,6 @@ async def _send_events_for_new_room( # created (but not persisted to the db) to determine state for future created events # (as this info can't be pulled from the db) state_map: MutableStateMap[str] = {} - # current_state_group of last event created. Used for computing event context of - # events to be batched - current_state_group = None def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: e = {"type": etype, "content": content} @@ -1110,7 +1085,7 @@ async def create_event( content: JsonDict, for_batch: bool, **kwargs: Any, - ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]: + ) -> Tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]: """ Creates an event and associated event context. Args: @@ -1129,20 +1104,33 @@ async def create_event( event_dict = create_event_dict(etype, content, **kwargs) - new_event, new_context = await self.event_creation_handler.create_event( + ( + new_event, + new_unpersisted_context, + ) = await self.event_creation_handler.create_event( creator, event_dict, prev_event_ids=prev_event, depth=depth, - state_map=state_map, + # Take a copy to ensure each event gets a unique copy of + # state_map since it is modified below. + state_map=dict(state_map), for_batch=for_batch, - current_state_group=current_state_group, ) + depth += 1 prev_event = [new_event.event_id] state_map[(new_event.type, new_event.state_key)] = new_event.event_id - return new_event, new_context + return new_event, new_unpersisted_context + + visibility = room_config.get("visibility", "private") + preset_config = room_config.get( + "preset", + RoomCreationPreset.PRIVATE_CHAT + if visibility == "private" + else RoomCreationPreset.PUBLIC_CHAT, + ) try: config = self._presets_dict[preset_config] @@ -1152,10 +1140,10 @@ async def create_event( ) creation_content.update({"creator": creator_id}) - creation_event, creation_context = await create_event( + creation_event, unpersisted_creation_context = await create_event( EventTypes.Create, creation_content, False ) - + creation_context = await unpersisted_creation_context.persist(creation_event) logger.debug("Sending %s in new room", EventTypes.Member) ev = await self.event_creation_handler.handle_new_client_event( requester=creator, @@ -1199,7 +1187,6 @@ async def create_event( power_event, power_context = await create_event( EventTypes.PowerLevels, pl_content, True ) - current_state_group = power_context._state_group events_to_send.append((power_event, power_context)) else: power_level_content: JsonDict = { @@ -1248,14 +1235,12 @@ async def create_event( power_level_content, True, ) - current_state_group = pl_context._state_group events_to_send.append((pl_event, pl_context)) if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state: room_alias_event, room_alias_context = await create_event( EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True ) - current_state_group = room_alias_context._state_group events_to_send.append((room_alias_event, room_alias_context)) if (EventTypes.JoinRules, "") not in initial_state: @@ -1264,7 +1249,6 @@ async def create_event( {"join_rule": config["join_rules"]}, True, ) - current_state_group = join_rules_context._state_group events_to_send.append((join_rules_event, join_rules_context)) if (EventTypes.RoomHistoryVisibility, "") not in initial_state: @@ -1273,7 +1257,6 @@ async def create_event( {"history_visibility": config["history_visibility"]}, True, ) - current_state_group = visibility_context._state_group events_to_send.append((visibility_event, visibility_context)) if config["guest_can_join"]: @@ -1283,14 +1266,12 @@ async def create_event( {EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN}, True, ) - current_state_group = guest_access_context._state_group events_to_send.append((guest_access_event, guest_access_context)) for (etype, state_key), content in initial_state.items(): event, context = await create_event( etype, content, True, state_key=state_key ) - current_state_group = context._state_group events_to_send.append((event, context)) if config["encrypted"]: @@ -1302,9 +1283,34 @@ async def create_event( ) events_to_send.append((encryption_event, encryption_context)) + if "name" in room_config: + name = room_config["name"] + name_event, name_context = await create_event( + EventTypes.Name, + {"name": name}, + True, + ) + events_to_send.append((name_event, name_context)) + + if "topic" in room_config: + topic = room_config["topic"] + topic_event, topic_context = await create_event( + EventTypes.Topic, + {"topic": topic}, + True, + ) + events_to_send.append((topic_event, topic_context)) + + datastore = self.hs.get_datastores().state + events_and_context = ( + await UnpersistedEventContext.batch_persist_unpersisted_contexts( + events_to_send, room_id, current_state_group, datastore + ) + ) + last_event = await self.event_creation_handler.handle_new_client_event( creator, - events_to_send, + events_and_context, ignore_shadow_ban=True, ratelimit=False, ) @@ -1844,7 +1850,7 @@ async def shutdown_room( new_room_user_id, authenticated_entity=requester_user_id ) - info, stream_id = await self._room_creation_handler.create_room( + new_room_id, _, stream_id = await self._room_creation_handler.create_room( room_creator_requester, config={ "preset": RoomCreationPreset.PUBLIC_CHAT, @@ -1853,7 +1859,6 @@ async def shutdown_room( }, ratelimit=False, ) - new_room_id = info["room_id"] logger.info( "Shutting down room %r, joining to new room: %r", room_id, new_room_id @@ -1907,6 +1912,7 @@ async def shutdown_room( # Join users to new room if new_room_user_id: + assert new_room_id is not None await self.room_member_handler.update_membership( requester=target_requester, target=target_requester.user, @@ -1939,6 +1945,7 @@ async def shutdown_room( aliases_for_room = await self.store.get_aliases_for_room(room_id) + assert new_room_id is not None await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id ) @@ -1956,6 +1963,6 @@ async def shutdown_room( return { "kicked_users": kicked_users, "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, + "local_aliases": list(aliases_for_room), "new_room_id": new_room_id, } diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index 681a41d3129b..43ed15ac0c60 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -346,7 +346,7 @@ async def persist_historical_events( # Mark all events as historical event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - event, context = await self.event_creation_handler.create_event( + event, unpersisted_context = await self.event_creation_handler.create_event( await self.create_requester_for_user_id_from_app_service( ev["sender"], app_service_requester.app_service, @@ -366,7 +366,7 @@ async def persist_historical_events( historical=not beeper_new_messages, depth=inherited_depth, ) - + context = await unpersisted_context.persist(event) assert context._state_group # Normally this is done when persisting the event but we have to @@ -408,7 +408,7 @@ async def persist_historical_events( # correct stream_ordering as they are backfilled (which decrements). # Events are sorted by (topological_ordering, stream_ordering) # where topological_ordering is just depth. - for (event, context) in reversed(events_to_persist): + for event, context in reversed(events_to_persist): # This call can't raise `PartialStateConflictError` since we forbid # use of the historical batch API during partial state await self.event_creation_handler.handle_new_client_event( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d236cc09b526..509c5578895b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,7 +26,13 @@ GuestAccess, Membership, ) -from synapse.api.errors import AuthError, Codes, ShadowBanError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + PartialStateConflictError, + ShadowBanError, + SynapseError, +) from synapse.api.ratelimiting import Ratelimiter from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase @@ -34,7 +40,6 @@ from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.logging import opentracing from synapse.module_api import NOT_SPAM -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.types import ( JsonDict, Requester, @@ -56,6 +61,13 @@ logger = logging.getLogger(__name__) +class NoKnownServersError(SynapseError): + """No server already resident to the room was provided to the join/knock operation.""" + + def __init__(self, msg: str = "No known servers"): + super().__init__(404, msg) + + class RoomMemberHandler(metaclass=abc.ABCMeta): # TODO(paul): This handler currently contains a messy conflation of # low-level API that works on UserID objects and so on, and REST-level @@ -185,12 +197,17 @@ async def _remote_join( room_id: Room that we are trying to join user: User who is trying to join content: A dict that should be used as the content of the join event. + + Raises: + NoKnownServersError: if remote_room_hosts does not contain a server joined to + the room. """ raise NotImplementedError() @abc.abstractmethod async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, @@ -398,7 +415,10 @@ async def _local_membership_update( max_retries = 5 for i in range(max_retries): try: - event, context = await self.event_creation_handler.create_event( + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_event( requester, { "type": EventTypes.Member, @@ -419,7 +439,7 @@ async def _local_membership_update( outlier=outlier, historical=historical, ) - + context = await unpersisted_context.persist(event) prev_state_ids = await context.get_prev_state_ids( StateFilter.from_types([(EventTypes.Member, None)]) ) @@ -484,7 +504,7 @@ async def copy_room_tags_and_direct_to_room( user_id: The user's ID. """ # Retrieve user account data for predecessor room - user_account_data, _ = await self.store.get_account_data_for_user(user_id) + user_account_data = await self.store.get_global_account_data_for_user(user_id) # Copy direct message state if applicable direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {}) @@ -823,14 +843,19 @@ async def update_membership_locked( latest_event_ids = await self.store.get_prev_events_for_room(room_id) - state_before_join = await self.state_handler.compute_state_after_events( - room_id, latest_event_ids + is_partial_state_room = await self.store.is_partial_state_room(room_id) + partial_state_before_join = await self.state_handler.compute_state_after_events( + room_id, latest_event_ids, await_full_state=False ) + # `is_partial_state_room` also indicates whether `partial_state_before_join` is + # partial. # TODO: Refactor into dictionary of explicitly allowed transitions # between old and new state, with specific error messages for some # transitions and generic otherwise - old_state_id = state_before_join.get((EventTypes.Member, target.to_string())) + old_state_id = partial_state_before_join.get( + (EventTypes.Member, target.to_string()) + ) if old_state_id: old_state = await self.store.get_event(old_state_id, allow_none=True) old_membership = old_state.content.get("membership") if old_state else None @@ -881,11 +906,11 @@ async def update_membership_locked( if action == "kick": raise AuthError(403, "The target user is not in the room") - is_host_in_room = await self._is_host_in_room(state_before_join) + is_host_in_room = await self._is_host_in_room(partial_state_before_join) if effective_membership_state == Membership.JOIN: if requester.is_guest: - guest_can_join = await self._can_guest_join(state_before_join) + guest_can_join = await self._can_guest_join(partial_state_before_join) if not guest_can_join: # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. @@ -927,8 +952,9 @@ async def update_membership_locked( room_id, remote_room_hosts, content, + is_partial_state_room, is_host_in_room, - state_before_join, + partial_state_before_join, ) if remote_join: if ratelimit: @@ -1048,7 +1074,7 @@ async def update_membership_locked( ) return await self.remote_knock( - remote_room_hosts, room_id, target, content + requester, remote_room_hosts, room_id, target, content ) return await self._local_membership_update( @@ -1073,8 +1099,9 @@ async def _should_perform_remote_join( room_id: str, remote_room_hosts: List[str], content: JsonDict, + is_partial_state_room: bool, is_host_in_room: bool, - state_before_join: StateMap[str], + partial_state_before_join: StateMap[str], ) -> Tuple[bool, List[str]]: """ Check whether the server should do a remote join (as opposed to a local @@ -1093,9 +1120,12 @@ async def _should_perform_remote_join( remote_room_hosts: A list of remote room hosts. content: The content to use as the event body of the join. This may be modified. - is_host_in_room: True if the host is in the room. - state_before_join: The state before the join event (i.e. the resolution of - the states after its parent events). + is_partial_state_room: `True` if the server currently doesn't hold the full + state of the room. + is_host_in_room: `True` if the host is in the room. + partial_state_before_join: The state before the join event (i.e. the + resolution of the states after its parent events). May be full or + partial state, depending on `is_partial_state_room`. Returns: A tuple of: @@ -1109,6 +1139,23 @@ async def _should_perform_remote_join( if not is_host_in_room: return True, remote_room_hosts + prev_member_event_id = partial_state_before_join.get( + (EventTypes.Member, user_id), None + ) + previous_membership = None + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + previous_membership = prev_member_event.membership + + # If we are not fully joined yet, and the target is not already in the room, + # let's do a remote join so another server with the full state can validate + # that the user has not been banned for example. + # We could just accept the join and wait for state res to resolve that later on + # but we would then leak room history to this person until then, which is pretty + # bad. + if is_partial_state_room and previous_membership != Membership.JOIN: + return True, remote_room_hosts + # If the host is in the room, but not one of the authorised hosts # for restricted join rules, a remote join must be used. room_version = await self.store.get_room_version(room_id) @@ -1116,21 +1163,19 @@ async def _should_perform_remote_join( # If restricted join rules are not being used, a local join can always # be used. if not await self.event_auth_handler.has_restricted_join_rules( - state_before_join, room_version + partial_state_before_join, room_version ): return False, [] # If the user is invited to the room or already joined, the join # event can always be issued locally. - prev_member_event_id = state_before_join.get((EventTypes.Member, user_id), None) - prev_member_event = None - if prev_member_event_id: - prev_member_event = await self.store.get_event(prev_member_event_id) - if prev_member_event.membership in ( - Membership.JOIN, - Membership.INVITE, - ): - return False, [] + if previous_membership in (Membership.JOIN, Membership.INVITE): + return False, [] + + # All the partial state cases are covered above. We have been given the full + # state of the room. + assert not is_partial_state_room + state_before_join = partial_state_before_join # If the local host has a user who can issue invites, then a local # join can be done. @@ -1154,7 +1199,7 @@ async def _should_perform_remote_join( # Ensure the member should be allowed access via membership in a room. await self.event_auth_handler.check_restricted_join_rules( - state_before_join, room_version, user_id, prev_member_event + state_before_join, room_version, user_id, previous_membership ) # If this is going to be a local join, additional information must @@ -1304,11 +1349,17 @@ async def send_membership_event( if prev_member_event.membership == Membership.JOIN: await self._user_left_room(target_user, room_id) - async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool: + async def _can_guest_join(self, partial_current_state_ids: StateMap[str]) -> bool: """ Returns whether a guest can join a room based on its current state. + + Args: + partial_current_state_ids: The current state of the room. May be full or + partial state. """ - guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None) + guest_access_id = partial_current_state_ids.get( + (EventTypes.GuestAccess, ""), None + ) if not guest_access_id: return False @@ -1634,19 +1685,25 @@ async def _make_and_store_3pid_invite( ) return event, stream_id - async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool: + async def _is_host_in_room(self, partial_current_state_ids: StateMap[str]) -> bool: + """Returns whether the homeserver is in the room based on its current state. + + Args: + partial_current_state_ids: The current state of the room. May be full or + partial state. + """ # Have we just created the room, and is this about to be the very # first member event? - create_event_id = current_state_ids.get(("m.room.create", "")) - if len(current_state_ids) == 1 and create_event_id: + create_event_id = partial_current_state_ids.get(("m.room.create", "")) + if len(partial_current_state_ids) == 1 and create_event_id: # We can only get here if we're in the process of creating the room return True - for etype, state_key in current_state_ids: + for etype, state_key in partial_current_state_ids: if etype != EventTypes.Member or not self.hs.is_mine_id(state_key): continue - event_id = current_state_ids[(etype, state_key)] + event_id = partial_current_state_ids[(etype, state_key)] event = await self.store.get_event(event_id, allow_none=True) if not event: continue @@ -1715,8 +1772,7 @@ async def _remote_join( ] if len(remote_room_hosts) == 0: - raise SynapseError( - 404, + raise NoKnownServersError( "Can't join remote room because no servers " "that are in the room have been provided.", ) @@ -1892,7 +1948,10 @@ async def _generate_local_out_of_band_leave( max_retries = 5 for i in range(max_retries): try: - event, context = await self.event_creation_handler.create_event( + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_event( requester, event_dict, txn_id=txn_id, @@ -1900,6 +1959,7 @@ async def _generate_local_out_of_band_leave( auth_event_ids=auth_event_ids, outlier=True, ) + context = await unpersisted_context.persist(event) event.internal_metadata.out_of_band_membership = True result_event = ( @@ -1925,6 +1985,7 @@ async def _generate_local_out_of_band_leave( async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, @@ -1947,7 +2008,7 @@ async def remote_knock( ] if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise NoKnownServersError() return await self.federation_handler.do_knock( remote_room_hosts, room_id, user.to_string(), content=content diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 221552a2a64b..76e36b8a6d53 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -15,8 +15,7 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple -from synapse.api.errors import SynapseError -from synapse.handlers.room_member import RoomMemberHandler +from synapse.handlers.room_member import NoKnownServersError, RoomMemberHandler from synapse.replication.http.membership import ( ReplicationRemoteJoinRestServlet as ReplRemoteJoin, ReplicationRemoteKnockRestServlet as ReplRemoteKnock, @@ -52,7 +51,7 @@ async def _remote_join( ) -> Tuple[str, int]: """Implements RoomMemberHandler._remote_join""" if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise NoKnownServersError() ret = await self._remote_join_client( requester=requester, @@ -114,6 +113,7 @@ async def remote_rescind_knock( async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, @@ -124,9 +124,10 @@ async def remote_knock( Implements RoomMemberHandler.remote_knock """ ret = await self._remote_knock_client( + requester=requester, remote_room_hosts=remote_room_hosts, room_id=room_id, - user=user, + user_id=user.to_string(), content=content, ) return ret["event_id"], ret["stream_id"] diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 4472019fbcd2..807245160db6 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -521,8 +521,8 @@ async def _is_local_room_accessible( It should return true if: - * The requester is joined or can join the room (per MSC3173). - * The origin server has any user that is joined or can join the room. + * The requesting user is joined or can join the room (per MSC3173); or + * The origin server has any user that is joined or can join the room; or * The history visibility is set to world readable. Args: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 9bbf83047dd6..aad4706f148a 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -23,7 +23,8 @@ from synapse.api.errors import NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.events import EventBase -from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID +from synapse.events.utils import SerializeEventConfig +from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID from synapse.types.state import StateFilter from synapse.visibility import filter_events_for_client @@ -109,12 +110,12 @@ async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]: return historical_room_ids async def search( - self, user: UserID, content: JsonDict, batch: Optional[str] = None + self, requester: Requester, content: JsonDict, batch: Optional[str] = None ) -> JsonDict: """Performs a full text search for a user. Args: - user: The user performing the search. + requester: The user performing the search. content: Search parameters batch: The next_batch parameter. Used for pagination. @@ -199,7 +200,7 @@ async def search( ) return await self._search( - user, + requester, batch_group, batch_group_key, batch_token, @@ -217,7 +218,7 @@ async def search( async def _search( self, - user: UserID, + requester: Requester, batch_group: Optional[str], batch_group_key: Optional[str], batch_token: Optional[str], @@ -235,7 +236,7 @@ async def _search( """Performs a full text search for a user. Args: - user: The user performing the search. + requester: The user performing the search. batch_group: Pagination information. batch_group_key: Pagination information. batch_token: Pagination information. @@ -269,7 +270,7 @@ async def _search( # TODO: Search through left rooms too rooms = await self.store.get_rooms_for_local_user_where_membership_is( - user.to_string(), + requester.user.to_string(), membership_list=[Membership.JOIN], # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban], ) @@ -303,13 +304,13 @@ async def _search( if order_by == "rank": search_result, sender_group = await self._search_by_rank( - user, room_ids, search_term, keys, search_filter + requester.user, room_ids, search_term, keys, search_filter ) # Unused return values for rank search. global_next_batch = None elif order_by == "recent": search_result, global_next_batch = await self._search_by_recent( - user, + requester.user, room_ids, search_term, keys, @@ -334,7 +335,7 @@ async def _search( assert after_limit is not None contexts = await self._calculate_event_contexts( - user, + requester.user, search_result.allowed_events, before_limit, after_limit, @@ -363,27 +364,37 @@ async def _search( # The returned events. search_result.allowed_events, ), - user.to_string(), + requester.user.to_string(), ) # We're now about to serialize the events. We should not make any # blocking calls after this. Otherwise, the 'age' will be wrong. time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now, bundle_aggregations=aggregations + context["events_before"], + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now, bundle_aggregations=aggregations + context["events_after"], + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ) results = [ { "rank": search_result.rank_map[e.event_id], "result": self._event_serializer.serialize_event( - e, time_now, bundle_aggregations=aggregations + e, + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ), "context": contexts.get(e.event_id, {}), } @@ -398,7 +409,9 @@ async def _search( if state_results: rooms_cat_res["state"] = { - room_id: self._event_serializer.serialize_events(state_events, time_now) + room_id: self._event_serializer.serialize_events( + state_events, time_now, config=serialize_options + ) for room_id, state_events in state_results.items() } diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a8fa06538aed..7b14ce2d089e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -269,6 +269,8 @@ def __init__(self, hs: "HomeServer"): self._state_storage_controller = self._storage_controllers.state self._device_handler = hs.get_device_handler() + self.should_calculate_push_rules = hs.config.push.enable_push + # TODO: flush cache entries on subsequent sync request. # Once we get the next /sync request (ie, one with the same access token # that sets 'since' to 'next_batch'), we know that device won't need a @@ -1241,6 +1243,10 @@ async def _find_missing_partial_state_memberships( continue event_with_membership_auth = events_with_membership_auth[member] + is_create = ( + event_with_membership_auth.is_state() + and event_with_membership_auth.type == EventTypes.Create + ) is_join = ( event_with_membership_auth.is_state() and event_with_membership_auth.type == EventTypes.Member @@ -1248,9 +1254,10 @@ async def _find_missing_partial_state_memberships( and event_with_membership_auth.content.get("membership") == Membership.JOIN ) - if not is_join: + if not is_create and not is_join: # The event must include the desired membership as an auth event, unless - # it's the first join event for a given user. + # it's the `m.room.create` event for a room or the first join event for + # a given user. missing_members.add(member) auth_event_ids.update(event_with_membership_auth.auth_event_ids()) @@ -1305,8 +1312,13 @@ async def _find_missing_partial_state_memberships( async def unread_notifs_for_room_id( self, room_id: str, sync_config: SyncConfig ) -> RoomNotifCounts: - with Measure(self.clock, "unread_notifs_for_room_id"): + if not self.should_calculate_push_rules: + # If push rules have been universally disabled then we know we won't + # have any unread counts in the DB, so we may as well skip asking + # the DB. + return RoomNotifCounts.empty() + with Measure(self.clock, "unread_notifs_for_room_id"): return await self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), @@ -1438,6 +1450,11 @@ async def generate_sync_result( for room_id, is_partial_state in results.items() if is_partial_state ) + membership_change_events = [ + event + for event in membership_change_events + if not results.get(event.room_id, False) + ] # Incremental eager syncs should additionally include rooms that # - we are joined to @@ -1504,9 +1521,9 @@ async def generate_sync_result( logger.debug("Fetching account data") - account_data_by_room = await self._generate_sync_entry_for_account_data( - sync_result_builder - ) + # Global account data is included if it is not filtered out. + if not sync_config.filter_collection.blocks_all_global_account_data(): + await self._generate_sync_entry_for_account_data(sync_result_builder) # Presence data is included if the server has it enabled and not filtered out. include_presence_data = bool( @@ -1532,9 +1549,7 @@ async def generate_sync_result( ( newly_joined_rooms, newly_left_rooms, - ) = await self._generate_sync_entry_for_rooms( - sync_result_builder, account_data_by_room - ) + ) = await self._generate_sync_entry_for_rooms(sync_result_builder) # Work out which users have joined or left rooms we're in. We use this # to build the presence and device_list parts of the sync response in @@ -1581,7 +1596,7 @@ async def generate_sync_result( one_time_keys_count = await self.store.count_e2e_one_time_keys( user_id, device_id ) - unused_fallback_key_types = ( + unused_fallback_key_types = list( await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) ) @@ -1777,35 +1792,29 @@ async def _generate_sync_entry_for_to_device( async def _generate_sync_entry_for_account_data( self, sync_result_builder: "SyncResultBuilder" - ) -> Dict[str, Dict[str, JsonDict]]: - """Generates the account data portion of the sync response. + ) -> None: + """Generates the global account data portion of the sync response. Account data (called "Client Config" in the spec) can be set either globally or for a specific room. Account data consists of a list of events which accumulate state, much like a room. - This function retrieves global and per-room account data. The former is written - to the given `sync_result_builder`. The latter is returned directly, to be - later written to the `sync_result_builder` on a room-by-room basis. + This function retrieves global account data and writes it to the given + `sync_result_builder`. See `_generate_sync_entry_for_rooms` for handling + of per-room account data. Args: sync_result_builder - - Returns: - A dictionary whose keys (room ids) map to the per room account data for that - room. """ sync_config = sync_result_builder.sync_config user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token if since_token and not sync_result_builder.full_state: - # TODO Do not fetch room account data if it will be unused. - ( - global_account_data, - account_data_by_room, - ) = await self.store.get_updated_account_data_for_user( - user_id, since_token.account_data_key + global_account_data = ( + await self.store.get_updated_global_account_data_for_user( + user_id, since_token.account_data_key + ) ) push_rules_changed = await self.store.have_push_rules_changed_for_user( @@ -1813,31 +1822,31 @@ async def _generate_sync_entry_for_account_data( ) if push_rules_changed: + global_account_data = dict(global_account_data) global_account_data["m.push_rules"] = await self.push_rules_for_user( sync_config.user ) else: - # TODO Do not fetch room account data if it will be unused. - ( - global_account_data, - account_data_by_room, - ) = await self.store.get_account_data_for_user(sync_config.user.to_string()) + all_global_account_data = await self.store.get_global_account_data_for_user( + user_id + ) + global_account_data = dict(all_global_account_data) global_account_data["m.push_rules"] = await self.push_rules_for_user( sync_config.user ) - account_data_for_user = await sync_config.filter_collection.filter_account_data( - [ - {"type": account_data_type, "content": content} - for account_data_type, content in global_account_data.items() - ] + account_data_for_user = ( + await sync_config.filter_collection.filter_global_account_data( + [ + {"type": account_data_type, "content": content} + for account_data_type, content in global_account_data.items() + ] + ) ) sync_result_builder.account_data = account_data_for_user - return account_data_by_room - async def _generate_sync_entry_for_presence( self, sync_result_builder: "SyncResultBuilder", @@ -1897,9 +1906,7 @@ async def _generate_sync_entry_for_presence( sync_result_builder.presence = presence async def _generate_sync_entry_for_rooms( - self, - sync_result_builder: "SyncResultBuilder", - account_data_by_room: Dict[str, Dict[str, JsonDict]], + self, sync_result_builder: "SyncResultBuilder" ) -> Tuple[AbstractSet[str], AbstractSet[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. @@ -1910,7 +1917,6 @@ async def _generate_sync_entry_for_rooms( Args: sync_result_builder - account_data_by_room: Dictionary of per room account data Returns: Returns a 2-tuple describing rooms the user has joined or left. @@ -1923,9 +1929,30 @@ async def _generate_sync_entry_for_rooms( since_token = sync_result_builder.since_token user_id = sync_result_builder.sync_config.user.to_string() + blocks_all_rooms = ( + sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + ) + + # 0. Start by fetching room account data (if required). + if ( + blocks_all_rooms + or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data() + ): + account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {} + elif since_token and not sync_result_builder.full_state: + account_data_by_room = ( + await self.store.get_updated_room_account_data_for_user( + user_id, since_token.account_data_key + ) + ) + else: + account_data_by_room = await self.store.get_room_account_data_for_user( + user_id + ) + # 1. Start by fetching all ephemeral events in rooms we've joined (if required). block_all_room_ephemeral = ( - sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + blocks_all_rooms or sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() ) if block_all_room_ephemeral: @@ -2357,8 +2384,8 @@ async def _generate_room_entry( sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], - tags: Optional[Dict[str, Dict[str, Any]]], - account_data: Dict[str, JsonDict], + tags: Optional[Mapping[str, Mapping[str, Any]]], + account_data: Mapping[str, JsonDict], always_include: bool = False, ) -> None: """Populates the `joined` and `archived` section of `sync_result_builder` diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 332edcca24e5..78a75bfed6ee 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -13,7 +13,8 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, ClassVar, Sequence, Type from twisted.web.client import PartialDownloadError @@ -27,19 +28,28 @@ logger = logging.getLogger(__name__) -class UserInteractiveAuthChecker: +class UserInteractiveAuthChecker(ABC): """Abstract base class for an interactive auth checker""" - def __init__(self, hs: "HomeServer"): + # This should really be an "abstract class property", i.e. it should + # be an error to instantiate a subclass that doesn't specify an AUTH_TYPE. + # But calling this a `ClassVar` is simpler than a decorator stack of + # @property @abstractmethod and @classmethod (if that's even the right order). + AUTH_TYPE: ClassVar[str] + + def __init__(self, hs: "HomeServer"): # noqa: B027 pass + @abstractmethod def is_enabled(self) -> bool: """Check if the configuration of the homeserver allows this checker to work Returns: True if this login type is enabled. """ + raise NotImplementedError() + @abstractmethod async def check_auth(self, authdict: dict, clientip: str) -> Any: """Given the authentication dict from the client, attempt to check this step @@ -304,7 +314,7 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: ) -INTERACTIVE_AUTH_CHECKERS = [ +INTERACTIVE_AUTH_CHECKERS: Sequence[Type[UserInteractiveAuthChecker]] = [ DummyAuthChecker, TermsAuthChecker, RecaptchaAuthChecker, diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 3610b6bf785e..28a92d41d6fc 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -13,21 +13,52 @@ # limitations under the License. import logging +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple +from twisted.internet.interfaces import IDelayedCall + import synapse.metrics from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership +from synapse.api.errors import Codes, SynapseError from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo +from synapse.types import UserID from synapse.util.metrics import Measure +from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import non_null_str_or_none if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) +# Don't refresh a stale user directory entry, using a Federation /profile request, +# for 60 seconds. This gives time for other state events to arrive (which will +# then be coalesced such that only one /profile request is made). +USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000 + +# Maximum number of remote servers that we will attempt to refresh profiles for +# in one go. +MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5 + +# As long as we have servers to refresh (without backoff), keep adding more +# every 15 seconds. +INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15 + + +def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int: + """ + Calculates the time of a next retry given `now_ts` in ms and the number + of failures encountered thus far. + + Currently the sequence goes: + 1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week + """ + return now_ts + 60_000 * (5 ** min(retry_count, 7)) + class UserDirectoryHandler(StateDeltasHandler): """Handles queries and updates for the user_directory. @@ -64,12 +95,24 @@ def __init__(self, hs: "HomeServer"): self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users self.spam_checker = hs.get_spam_checker() + self._hs = hs + # The current position in the current_state_delta stream self.pos: Optional[int] = None # Guard to ensure we only process deltas one at a time self._is_processing = False + # Guard to ensure we only have one process for refreshing remote profiles + self._is_refreshing_remote_profiles = False + # Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process` + self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None + + # Guard to ensure we only have one process for refreshing remote profiles + # for the given servers. + # Set of server names. + self._is_refreshing_remote_profiles_for_servers: Set[str] = set() + if self.update_user_directory: self.notifier.add_replication_callback(self.notify_new_event) @@ -77,6 +120,11 @@ def __init__(self, hs: "HomeServer"): # we start populating the user directory self.clock.call_later(0, self.notify_new_event) + # Kick off the profile refresh process on startup + self._refresh_remote_profiles_call_later = self.clock.call_later( + 10, self.kick_off_remote_profile_refresh_process + ) + async def search_users( self, user_id: str, search_term: str, limit: int ) -> SearchResult: @@ -200,8 +248,8 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: typ = delta["type"] state_key = delta["state_key"] room_id = delta["room_id"] - event_id = delta["event_id"] - prev_event_id = delta["prev_event_id"] + event_id: Optional[str] = delta["event_id"] + prev_event_id: Optional[str] = delta["prev_event_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) @@ -297,8 +345,8 @@ async def _handle_room_publicity_change( async def _handle_room_membership_event( self, room_id: str, - prev_event_id: str, - event_id: str, + prev_event_id: Optional[str], + event_id: Optional[str], state_key: str, ) -> None: """Process a single room membershp event. @@ -348,7 +396,8 @@ async def _handle_room_membership_event( # Handle any profile changes for remote users. # (For local users the rest of the application calls # `handle_local_profile_change`.) - if is_remote: + # Only process if there is an event_id. + if is_remote and event_id is not None: await self._handle_possible_remote_profile_change( state_key, room_id, prev_event_id, event_id ) @@ -356,29 +405,13 @@ async def _handle_room_membership_event( # This may be the first time we've seen a remote user. If # so, ensure we have a directory entry for them. (For local users, # the rest of the application calls `handle_local_profile_change`.) - if is_remote: - await self._upsert_directory_entry_for_remote_user(state_key, event_id) + # Only process if there is an event_id. + if is_remote and event_id is not None: + await self._handle_possible_remote_profile_change( + state_key, room_id, None, event_id + ) await self._track_user_joined_room(room_id, state_key) - async def _upsert_directory_entry_for_remote_user( - self, user_id: str, event_id: str - ) -> None: - """A remote user has just joined a room. Ensure they have an entry in - the user directory. The caller is responsible for making sure they're - remote. - """ - event = await self.store.get_event(event_id, allow_none=True) - # It isn't expected for this event to not exist, but we - # don't want the entire background process to break. - if event is None: - return - - logger.debug("Adding new user to dir, %r", user_id) - - await self.store.update_profile_in_user_dir( - user_id, event.content.get("displayname"), event.content.get("avatar_url") - ) - async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None: """Someone's just joined a room. Update `users_in_public_rooms` or `users_who_share_private_rooms` as appropriate. @@ -460,14 +493,17 @@ async def _handle_possible_remote_profile_change( user_id: str, room_id: str, prev_event_id: Optional[str], - event_id: Optional[str], + event_id: str, ) -> None: """Check member event changes for any profile changes and update the database if there are. This is intended for remote users only. The caller is responsible for checking that the given user is remote. """ - if not prev_event_id or not event_id: - return + + if not prev_event_id: + # If we don't have an older event to fall back on, just fetch the same + # event itself. + prev_event_id = event_id prev_event = await self.store.get_event(prev_event_id, allow_none=True) event = await self.store.get_event(event_id, allow_none=True) @@ -478,17 +514,236 @@ async def _handle_possible_remote_profile_change( if event.membership != Membership.JOIN: return + is_public = await self.store.is_room_world_readable_or_publicly_joinable( + room_id + ) + if not is_public: + # Don't collect user profiles from private rooms as they are not guaranteed + # to be the same as the user's global profile. + now_ts = self.clock.time_msec() + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS, + retry_counter=0, + ) + # Schedule a wake-up to refresh the user directory for this server. + # We intentionally wake up this server directly because we don't want + # other servers ahead of it in the queue to get in the way of updating + # the profile if the server only just sent us an event. + self.clock.call_later( + USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + self.kick_off_remote_profile_refresh_process_for_remote_server, + UserID.from_string(user_id).domain, + ) + # Schedule a wake-up to handle any backoffs that may occur in the future. + self.clock.call_later( + 2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + self.kick_off_remote_profile_refresh_process, + ) + return + prev_name = prev_event.content.get("displayname") new_name = event.content.get("displayname") - # If the new name is an unexpected form, do not update the directory. + # If the new name is an unexpected form, replace with None. if not isinstance(new_name, str): - new_name = prev_name + new_name = None prev_avatar = prev_event.content.get("avatar_url") new_avatar = event.content.get("avatar_url") - # If the new avatar is an unexpected form, do not update the directory. + # If the new avatar is an unexpected form, replace with None. if not isinstance(new_avatar, str): - new_avatar = prev_avatar + new_avatar = None - if prev_name != new_name or prev_avatar != new_avatar: + if ( + prev_name != new_name + or prev_avatar != new_avatar + or prev_event_id == event_id + ): + # Only update if something has changed, or we didn't have a previous event + # in the first place. await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar) + + def kick_off_remote_profile_refresh_process(self) -> None: + """Called when there may be remote users with stale profiles to be refreshed""" + if not self.update_user_directory: + return + + if self._is_refreshing_remote_profiles: + return + + if self._refresh_remote_profiles_call_later: + if self._refresh_remote_profiles_call_later.active(): + self._refresh_remote_profiles_call_later.cancel() + self._refresh_remote_profiles_call_later = None + + async def process() -> None: + try: + await self._unsafe_refresh_remote_profiles() + finally: + self._is_refreshing_remote_profiles = False + + self._is_refreshing_remote_profiles = True + run_as_background_process("user_directory.refresh_remote_profiles", process) + + async def _unsafe_refresh_remote_profiles(self) -> None: + limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len( + self._is_refreshing_remote_profiles_for_servers + ) + if limit <= 0: + # nothing to do: already refreshing the maximum number of servers + # at once. + # Come back later. + self._refresh_remote_profiles_call_later = self.clock.call_later( + INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES, + self.kick_off_remote_profile_refresh_process, + ) + return + + servers_to_refresh = ( + await self.store.get_remote_servers_with_profiles_to_refresh( + now_ts=self.clock.time_msec(), limit=limit + ) + ) + + if not servers_to_refresh: + # Do we have any backing-off servers that we should try again + # for eventually? + # By setting `now` is a point in the far future, we can ask for + # which server/user is next to be refreshed, even though it is + # not actually refreshable *now*. + end_of_time = 1 << 62 + backing_off_servers = ( + await self.store.get_remote_servers_with_profiles_to_refresh( + now_ts=end_of_time, limit=1 + ) + ) + if backing_off_servers: + # Find out when the next user is refreshable and schedule a + # refresh then. + backing_off_server_name = backing_off_servers[0] + users = await self.store.get_remote_users_to_refresh_on_server( + backing_off_server_name, now_ts=end_of_time, limit=1 + ) + if not users: + return + _, _, next_try_at_ts = users[0] + self._refresh_remote_profiles_call_later = self.clock.call_later( + ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2, + self.kick_off_remote_profile_refresh_process, + ) + + return + + for server_to_refresh in servers_to_refresh: + self.kick_off_remote_profile_refresh_process_for_remote_server( + server_to_refresh + ) + + self._refresh_remote_profiles_call_later = self.clock.call_later( + INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES, + self.kick_off_remote_profile_refresh_process, + ) + + def kick_off_remote_profile_refresh_process_for_remote_server( + self, server_name: str + ) -> None: + """Called when there may be remote users with stale profiles to be refreshed + on the given server.""" + if not self.update_user_directory: + return + + if server_name in self._is_refreshing_remote_profiles_for_servers: + return + + async def process() -> None: + try: + await self._unsafe_refresh_remote_profiles_for_remote_server( + server_name + ) + finally: + self._is_refreshing_remote_profiles_for_servers.remove(server_name) + + self._is_refreshing_remote_profiles_for_servers.add(server_name) + run_as_background_process( + "user_directory.refresh_remote_profiles_for_remote_server", process + ) + + async def _unsafe_refresh_remote_profiles_for_remote_server( + self, server_name: str + ) -> None: + logger.info("Refreshing profiles in user directory for %s", server_name) + + while True: + # Get a handful of users to process. + next_batch = await self.store.get_remote_users_to_refresh_on_server( + server_name, now_ts=self.clock.time_msec(), limit=10 + ) + if not next_batch: + # Finished for now + return + + for user_id, retry_counter, _ in next_batch: + # Request the profile of the user. + try: + profile = await self._hs.get_profile_handler().get_profile( + user_id, ignore_backoff=False + ) + except NotRetryingDestination as e: + logger.info( + "Failed to refresh profile for %r because the destination is undergoing backoff", + user_id, + ) + # As a special-case, we back off until the destination is no longer + # backed off from. + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + e.retry_last_ts + e.retry_interval, + retry_counter=retry_counter + 1, + ) + continue + except SynapseError as e: + if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND: + # The profile doesn't exist. + # TODO Does this mean we should clear it from our user + # directory? + await self.store.clear_remote_user_profile_in_user_dir_stale( + user_id + ) + logger.warning( + "Refresh of remote profile %r: not found (%r)", + user_id, + e.msg, + ) + continue + + logger.warning( + "Failed to refresh profile for %r because %r", user_id, e + ) + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + calculate_time_of_next_retry( + self.clock.time_msec(), retry_counter + 1 + ), + retry_counter=retry_counter + 1, + ) + continue + except Exception: + logger.error( + "Failed to refresh profile for %r due to unhandled exception", + user_id, + exc_info=True, + ) + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + calculate_time_of_next_retry( + self.clock.time_msec(), retry_counter + 1 + ), + retry_counter=retry_counter + 1, + ) + continue + + await self.store.update_profile_in_user_dir( + user_id, + display_name=non_null_str_or_none(profile.get("displayname")), + avatar_url=non_null_str_or_none(profile.get("avatar_url")), + ) diff --git a/synapse/http/client.py b/synapse/http/client.py index 49160a8410f3..d45ac94a047b 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -44,6 +44,7 @@ IAddress, IDelayedCall, IHostResolution, + IOpenSSLContextFactory, IReactorCore, IReactorPluggableNameResolver, IReactorTime, @@ -267,8 +268,8 @@ class BlacklistingAgentWrapper(Agent): def __init__( self, agent: IAgent, + ip_blacklist: IPSet, ip_whitelist: Optional[IPSet] = None, - ip_blacklist: Optional[IPSet] = None, ): """ Args: @@ -290,7 +291,9 @@ def request( h = urllib.parse.urlparse(uri.decode("ascii")) try: - ip_address = IPAddress(h.hostname) + # h.hostname is Optional[str], None raises an AddrFormatError, so + # this is safe even though IPAddress requires a str. + ip_address = IPAddress(h.hostname) # type: ignore[arg-type] except AddrFormatError: # Not an IP pass @@ -388,8 +391,8 @@ def __init__( # by the DNS resolution. self.agent = BlacklistingAgentWrapper( self.agent, - ip_whitelist=self._ip_whitelist, ip_blacklist=self._ip_blacklist, + ip_whitelist=self._ip_whitelist, ) async def request( @@ -959,8 +962,8 @@ def __init__(self) -> None: self._context = SSL.Context(SSL.SSLv23_METHOD) self._context.set_verify(VERIFY_NONE, lambda *_: False) - def getContext(self, hostname=None, port=None): + def getContext(self) -> SSL.Context: return self._context - def creatorForNetloc(self, hostname: bytes, port: int): + def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory: return self diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 0359231e7dd3..8d7d0a387529 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -87,7 +87,7 @@ def __init__( reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, - ip_whitelist: IPSet, + ip_whitelist: Optional[IPSet], ip_blacklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b92f1d3d1af5..3302d4e48a0f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -440,7 +440,7 @@ async def _send_request( Args: request: details of request to be sent - retry_on_dns_fail: true if the request should be retied on DNS failures + retry_on_dns_fail: true if the request should be retried on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. @@ -475,7 +475,7 @@ async def _send_request( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -871,7 +871,7 @@ async def put_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -958,7 +958,7 @@ async def post_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1036,6 +1036,8 @@ async def get_json( args: A dictionary used to create query strings, defaults to None. + retry_on_dns_fail: true if the request should be retried on DNS failures + timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. @@ -1063,7 +1065,7 @@ async def get_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1141,7 +1143,7 @@ async def delete_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1197,7 +1199,7 @@ async def get_file( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1267,7 +1269,7 @@ async def get_file( def _flatten_response_never_received(e: BaseException) -> str: if hasattr(e, "reasons"): reasons = ", ".join( - _flatten_response_never_received(f.value) for f in e.reasons # type: ignore[attr-defined] + _flatten_response_never_received(f.value) for f in e.reasons ) return "%s:[%s]" % (type(e).__name__, reasons) diff --git a/synapse/http/server.py b/synapse/http/server.py index 2563858f3cdf..7b760505b25a 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -30,7 +30,6 @@ Iterable, Iterator, List, - NoReturn, Optional, Pattern, Tuple, @@ -340,7 +339,8 @@ async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, An return callback_return - return _unrecognised_request_handler(request) + # A request with an unknown method (for a known endpoint) was received. + raise UnrecognizedRequestError(code=405) @abc.abstractmethod def _send_response( @@ -396,7 +396,6 @@ def _send_error_response( @attr.s(slots=True, frozen=True, auto_attribs=True) class _PathEntry: - pattern: Pattern callback: ServletCallback servlet_classname: str @@ -425,13 +424,14 @@ def __init__( ): super().__init__(canonical_json, extract_context) self.clock = hs.get_clock() - self.path_regexs: Dict[bytes, List[_PathEntry]] = {} + # Map of path regex -> method -> callback. + self._routes: Dict[Pattern[str], Dict[bytes, _PathEntry]] = {} self.hs = hs def register_paths( self, method: str, - path_patterns: Iterable[Pattern], + path_patterns: Iterable[Pattern[str]], callback: ServletCallback, servlet_classname: str, ) -> None: @@ -455,8 +455,8 @@ def register_paths( for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) - self.path_regexs.setdefault(method_bytes, []).append( - _PathEntry(path_pattern, callback, servlet_classname) + self._routes.setdefault(path_pattern, {})[method_bytes] = _PathEntry( + callback, servlet_classname ) def _get_handler_for_request( @@ -478,14 +478,17 @@ def _get_handler_for_request( # Loop through all the registered callbacks to check if the method # and path regex match - for path_entry in self.path_regexs.get(request_method, []): - m = path_entry.pattern.match(request_path) + for path_pattern, methods in self._routes.items(): + m = path_pattern.match(request_path) if m: - # We found a match! + # We found a matching path! + path_entry = methods.get(request_method) + if not path_entry: + raise UnrecognizedRequestError(code=405) return path_entry.callback, path_entry.servlet_classname, m.groupdict() - # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - return _unrecognised_request_handler, "unrecognised_request_handler", {} + # Huh. No one wanted to handle that? Fiiiiiine. + raise UnrecognizedRequestError(code=404) async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) @@ -567,19 +570,6 @@ def render_GET(self, request: Request) -> bytes: return super().render_GET(request) -def _unrecognised_request_handler(request: Request) -> NoReturn: - """Request handler for unrecognised requests - - This is a request handler suitable for return from - _get_handler_for_request. It actually just raises an - UnrecognizedRequestError. - - Args: - request: Unused, but passed in to match the signature of ServletCallback. - """ - raise UnrecognizedRequestError(code=404) - - class UnrecognizedRequestResource(resource.Resource): """ Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an @@ -902,6 +892,10 @@ def set_cors_headers(request: SynapseRequest) -> None: b"Access-Control-Allow-Headers", b"X-Requested-With, Content-Type, Authorization, Date", ) + request.setHeader( + b"Access-Control-Expose-Headers", + b"Synapse-Trace-Id", + ) def set_corp_headers(request: Request) -> None: diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 8ef9a0dda8e5..c70eee649c57 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -188,7 +188,7 @@ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): ) import attr -from typing_extensions import ParamSpec +from typing_extensions import Concatenate, ParamSpec from twisted.internet import defer from twisted.web.http import Request @@ -445,7 +445,7 @@ def init_tracer(hs: "HomeServer") -> None: opentracing = None # type: ignore[assignment] return - if not opentracing or not JaegerConfig: + if opentracing is None or JaegerConfig is None: raise ConfigError( "The server has been configured to use opentracing but opentracing is not " "installed." @@ -466,8 +466,16 @@ def init_tracer(hs: "HomeServer") -> None: STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name() ) + jaeger_config = hs.config.tracing.jaeger_config + tags = jaeger_config.setdefault("tags", {}) + + # tag the Synapse instance name so that it's an easy jumping + # off point into the logs. Can also be used to filter for an + # instance that is under load. + tags[SynapseTags.INSTANCE_NAME] = hs.get_instance_name() + config = JaegerConfig( - config=hs.config.tracing.jaeger_config, + config=jaeger_config, service_name=f"{hs.config.server.server_name} {instance_name_by_type}", scope_manager=LogContextScopeManager(), metrics_factory=PrometheusMetricsFactory(), @@ -516,6 +524,7 @@ def whitelisted_homeserver(destination: str) -> bool: # Start spans and scopes + # Could use kwargs but I want these to be explicit def start_active_span( operation_name: str, @@ -864,7 +873,7 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte def _custom_sync_async_decorator( func: Callable[P, R], - wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]], + wrapping_logic: Callable[Concatenate[Callable[P, R], P], ContextManager[None]], ) -> Callable[P, R]: """ Decorates a function that is sync or async (coroutines), or that returns a Twisted @@ -894,10 +903,14 @@ def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> """ if inspect.iscoroutinefunction(func): - + # In this branch, R = Awaitable[RInner], for some other type RInner @wraps(func) - async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + async def _wrapper( + *args: P.args, **kwargs: P.kwargs + ) -> Any: # Return type is RInner with wrapping_logic(func, *args, **kwargs): + # type-ignore: func() returns R, but mypy doesn't know that R is + # Awaitable here. return await func(*args, **kwargs) # type: ignore[misc] else: @@ -964,7 +977,11 @@ def _decorator(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func - return _custom_sync_async_decorator(func, _wrapping_logic) + # type-ignore: mypy seems to be confused by the ParamSpecs here. + # I think the problem is https://github.com/python/mypy/issues/12909 + return _custom_sync_async_decorator( + func, _wrapping_logic # type: ignore[arg-type] + ) return _decorator @@ -1010,7 +1027,9 @@ def _wrapping_logic( set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) yield - return _custom_sync_async_decorator(func, _wrapping_logic) + # type-ignore: mypy seems to be confused by the ParamSpecs here. + # I think the problem is https://github.com/python/mypy/issues/12909 + return _custom_sync_async_decorator(func, _wrapping_logic) # type: ignore[arg-type] @contextlib.contextmanager diff --git a/synapse/media/_base.py b/synapse/media/_base.py new file mode 100644 index 000000000000..ef8334ae2586 --- /dev/null +++ b/synapse/media/_base.py @@ -0,0 +1,479 @@ +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import urllib +from abc import ABC, abstractmethod +from types import TracebackType +from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type + +import attr + +from twisted.internet.interfaces import IConsumer +from twisted.protocols.basic import FileSender +from twisted.web.server import Request + +from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.http.server import finish_request, respond_with_json +from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable +from synapse.util.stringutils import is_ascii, parse_and_validate_server_name + +logger = logging.getLogger(__name__) + +# list all text content types that will have the charset default to UTF-8 when +# none is given +TEXT_CONTENT_TYPES = [ + "text/css", + "text/csv", + "text/html", + "text/calendar", + "text/plain", + "text/javascript", + "application/json", + "application/ld+json", + "application/rtf", + "image/svg+xml", + "text/xml", +] + + +def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: + """Parses the server name, media ID and optional file name from the request URI + + Also performs some rough validation on the server name. + + Args: + request: The `Request`. + + Returns: + A tuple containing the parsed server name, media ID and optional file name. + + Raises: + SynapseError(404): if parsing or validation fail for any reason + """ + try: + # The type on postpath seems incorrect in Twisted 21.2.0. + postpath: List[bytes] = request.postpath # type: ignore + assert postpath + + # This allows users to append e.g. /test.png to the URL. Useful for + # clients that parse the URL to see content type. + server_name_bytes, media_id_bytes = postpath[:2] + server_name = server_name_bytes.decode("utf-8") + media_id = media_id_bytes.decode("utf8") + + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + + file_name = None + if len(postpath) > 2: + try: + file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) + except UnicodeDecodeError: + pass + return server_name, media_id, file_name + except Exception: + raise SynapseError( + 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN + ) + + +def respond_404(request: SynapseRequest) -> None: + respond_with_json( + request, + 404, + cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), + send_cors=True, + ) + + +async def respond_with_file( + request: SynapseRequest, + media_type: str, + file_path: str, + file_size: Optional[int] = None, + upload_name: Optional[str] = None, +) -> None: + logger.debug("Responding with %r", file_path) + + if os.path.isfile(file_path): + if file_size is None: + stat = os.stat(file_path) + file_size = stat.st_size + + add_file_headers(request, media_type, file_size, upload_name) + + with open(file_path, "rb") as f: + await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) + + finish_request(request) + else: + respond_404(request) + + +def add_file_headers( + request: Request, + media_type: str, + file_size: Optional[int], + upload_name: Optional[str], +) -> None: + """Adds the correct response headers in preparation for responding with the + media. + + Args: + request + media_type: The media/content type. + file_size: Size in bytes of the media, if known. + upload_name: The name of the requested file, if any. + """ + + def _quote(x: str) -> str: + return urllib.parse.quote(x.encode("utf-8")) + + # Default to a UTF-8 charset for text content types. + # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16' + if media_type.lower() in TEXT_CONTENT_TYPES: + content_type = media_type + "; charset=UTF-8" + else: + content_type = media_type + + request.setHeader(b"Content-Type", content_type.encode("UTF-8")) + if upload_name: + # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. + # + # `filename` is defined to be a `value`, which is defined by RFC2616 + # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token` + # is (essentially) a single US-ASCII word, and a `quoted-string` is a + # US-ASCII string surrounded by double-quotes, using backslash as an + # escape character. Note that %-encoding is *not* permitted. + # + # `filename*` is defined to be an `ext-value`, which is defined in + # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`, + # where `value-chars` is essentially a %-encoded string in the given charset. + # + # [1]: https://tools.ietf.org/html/rfc6266#section-4.1 + # [2]: https://tools.ietf.org/html/rfc2616#section-3.6 + # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1 + + # We avoid the quoted-string version of `filename`, because (a) synapse didn't + # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we + # may as well just do the filename* version. + if _can_encode_filename_as_token(upload_name): + disposition = "inline; filename=%s" % (upload_name,) + else: + disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),) + + request.setHeader(b"Content-Disposition", disposition.encode("ascii")) + + # cache for at least a day. + # XXX: we might want to turn this off for data we don't want to + # recommend caching as it's sensitive or private - or at least + # select private. don't bother setting Expires as all our + # clients are smart enough to be happy with Cache-Control + request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400") + if file_size is not None: + request.setHeader(b"Content-Length", b"%d" % (file_size,)) + + # Tell web crawlers to not index, archive, or follow links in media. This + # should help to prevent things in the media repo from showing up in web + # search results. + request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") + + +# separators as defined in RFC2616. SP and HT are handled separately. +# see _can_encode_filename_as_token. +_FILENAME_SEPARATOR_CHARS = { + "(", + ")", + "<", + ">", + "@", + ",", + ";", + ":", + "\\", + '"', + "/", + "[", + "]", + "?", + "=", + "{", + "}", +} + + +def _can_encode_filename_as_token(x: str) -> bool: + for c in x: + # from RFC2616: + # + # token = 1* + # + # separators = "(" | ")" | "<" | ">" | "@" + # | "," | ";" | ":" | "\" | <"> + # | "/" | "[" | "]" | "?" | "=" + # | "{" | "}" | SP | HT + # + # CHAR = + # + # CTL = + # + if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS: + return False + return True + + +async def respond_with_responder( + request: SynapseRequest, + responder: "Optional[Responder]", + media_type: str, + file_size: Optional[int], + upload_name: Optional[str] = None, +) -> None: + """Responds to the request with given responder. If responder is None then + returns 404. + + Args: + request + responder + media_type: The media/content type. + file_size: Size in bytes of the media. If not known it should be None + upload_name: The name of the requested file, if any. + """ + if not responder: + respond_404(request) + return + + # If we have a responder we *must* use it as a context manager. + with responder: + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return + + logger.debug("Responding to media request with responder %s", responder) + add_file_headers(request, media_type, file_size, upload_name) + try: + await responder.write_to_consumer(request) + except Exception as e: + # The majority of the time this will be due to the client having gone + # away. Unfortunately, Twisted simply throws a generic exception at us + # in that case. + logger.warning("Failed to write to consumer: %s %s", type(e), e) + + # Unregister the producer, if it has one, so Twisted doesn't complain + if request.producer: + request.unregisterProducer() + + finish_request(request) + + +class Responder(ABC): + """Represents a response that can be streamed to the requester. + + Responder is a context manager which *must* be used, so that any resources + held can be cleaned up. + """ + + @abstractmethod + def write_to_consumer(self, consumer: IConsumer) -> Awaitable: + """Stream response into consumer + + Args: + consumer: The consumer to stream into. + + Returns: + Resolves once the response has finished being written + """ + raise NotImplementedError() + + def __enter__(self) -> None: # noqa: B027 + pass + + def __exit__( # noqa: B027 + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + pass + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class ThumbnailInfo: + """Details about a generated thumbnail.""" + + width: int + height: int + method: str + # Content type of thumbnail, e.g. image/png + type: str + # The size of the media file, in bytes. + length: Optional[int] = None + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class FileInfo: + """Details about a requested/uploaded file.""" + + # The server name where the media originated from, or None if local. + server_name: Optional[str] + # The local ID of the file. For local files this is the same as the media_id + file_id: str + # If the file is for the url preview cache + url_cache: bool = False + # Whether the file is a thumbnail or not. + thumbnail: Optional[ThumbnailInfo] = None + + # The below properties exist to maintain compatibility with third-party modules. + @property + def thumbnail_width(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.width + + @property + def thumbnail_height(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.height + + @property + def thumbnail_method(self) -> Optional[str]: + if not self.thumbnail: + return None + return self.thumbnail.method + + @property + def thumbnail_type(self) -> Optional[str]: + if not self.thumbnail: + return None + return self.thumbnail.type + + @property + def thumbnail_length(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.length + + +def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]: + """ + Get the filename of the downloaded file by inspecting the + Content-Disposition HTTP header. + + Args: + headers: The HTTP request headers. + + Returns: + The filename, or None. + """ + content_disposition = headers.get(b"Content-Disposition", [b""]) + + # No header, bail out. + if not content_disposition[0]: + return None + + _, params = _parse_header(content_disposition[0]) + + upload_name = None + + # First check if there is a valid UTF-8 filename + upload_name_utf8 = params.get(b"filename*", None) + if upload_name_utf8: + if upload_name_utf8.lower().startswith(b"utf-8''"): + upload_name_utf8 = upload_name_utf8[7:] + # We have a filename*= section. This MUST be ASCII, and any UTF-8 + # bytes are %-quoted. + try: + # Once it is decoded, we can then unquote the %-encoded + # parts strictly into a unicode string. + upload_name = urllib.parse.unquote( + upload_name_utf8.decode("ascii"), errors="strict" + ) + except UnicodeDecodeError: + # Incorrect UTF-8. + pass + + # If there isn't check for an ascii name. + if not upload_name: + upload_name_ascii = params.get(b"filename", None) + if upload_name_ascii and is_ascii(upload_name_ascii): + upload_name = upload_name_ascii.decode("ascii") + + # This may be None here, indicating we did not find a matching name. + return upload_name + + +def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]: + """Parse a Content-type like header. + + Cargo-culted from `cgi`, but works on bytes rather than strings. + + Args: + line: header to be parsed + + Returns: + The main content-type, followed by the parameter dictionary + """ + parts = _parseparam(b";" + line) + key = next(parts) + pdict = {} + for p in parts: + i = p.find(b"=") + if i >= 0: + name = p[:i].strip().lower() + value = p[i + 1 :].strip() + + # strip double-quotes + if len(value) >= 2 and value[0:1] == value[-1:] == b'"': + value = value[1:-1] + value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"') + pdict[name] = value + + return key, pdict + + +def _parseparam(s: bytes) -> Generator[bytes, None, None]: + """Generator which splits the input on ;, respecting double-quoted sequences + + Cargo-culted from `cgi`, but works on bytes rather than strings. + + Args: + s: header to be parsed + + Returns: + The split input + """ + while s[:1] == b";": + s = s[1:] + + # look for the next ; + end = s.find(b";") + + # if there is an odd number of " marks between here and the next ;, skip to the + # next ; instead + while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2: + end = s.find(b";", end + 1) + + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] diff --git a/synapse/rest/media/v1/filepath.py b/synapse/media/filepath.py similarity index 100% rename from synapse/rest/media/v1/filepath.py rename to synapse/media/filepath.py diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/media/media_repository.py similarity index 92% rename from synapse/rest/media/v1/media_repository.py rename to synapse/media/media_repository.py index c70e1837afd0..b81e3c2b0c60 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/media/media_repository.py @@ -32,18 +32,10 @@ RequestSendFailed, SynapseError, ) -from synapse.config._base import ConfigError from synapse.config.repository import ThumbnailRequirement -from synapse.http.server import UnrecognizedRequestResource from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import UserID -from synapse.util.async_helpers import Linearizer -from synapse.util.retryutils import NotRetryingDestination -from synapse.util.stringutils import random_string - -from ._base import ( +from synapse.media._base import ( FileInfo, Responder, ThumbnailInfo, @@ -51,15 +43,15 @@ respond_404, respond_with_responder, ) -from .config_resource import MediaConfigResource -from .download_resource import DownloadResource -from .filepath import MediaFilePaths -from .media_storage import MediaStorage -from .preview_url_resource import PreviewUrlResource -from .storage_provider import StorageProviderWrapper -from .thumbnail_resource import ThumbnailResource -from .thumbnailer import Thumbnailer, ThumbnailError -from .upload_resource import UploadResource +from synapse.media.filepath import MediaFilePaths +from synapse.media.media_storage import MediaStorage +from synapse.media.storage_provider import StorageProviderWrapper +from synapse.media.thumbnailer import Thumbnailer, ThumbnailError +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import UserID +from synapse.util.async_helpers import Linearizer +from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import random_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -1044,69 +1036,3 @@ async def _remove_local_media_from_disk( removed_media.append(media_id) return removed_media, len(removed_media) - - -class MediaRepositoryResource(UnrecognizedRequestResource): - """File uploading and downloading. - - Uploads are POSTed to a resource which returns a token which is used to GET - the download:: - - => POST /_matrix/media/r0/upload HTTP/1.1 - Content-Type: - Content-Length: - - - - <= HTTP/1.1 200 OK - Content-Type: application/json - - { "content_uri": "mxc:///" } - - => GET /_matrix/media/r0/download// HTTP/1.1 - - <= HTTP/1.1 200 OK - Content-Type: - Content-Disposition: attachment;filename= - - - - Clients can get thumbnails by supplying a desired width and height and - thumbnailing method:: - - => GET /_matrix/media/r0/thumbnail/ - /?width=&height=&method= HTTP/1.1 - - <= HTTP/1.1 200 OK - Content-Type: image/jpeg or image/png - - - - The thumbnail methods are "crop" and "scale". "scale" tries to return an - image where either the width or the height is smaller than the requested - size. The client should then scale and letterbox the image if it needs to - fit within a given rectangle. "crop" tries to return an image where the - width and height are close to the requested size and the aspect matches - the requested size. The client should scale the image if it needs to fit - within a given rectangle. - """ - - def __init__(self, hs: "HomeServer"): - # If we're not configured to use it, raise if we somehow got here. - if not hs.config.media.can_load_media_repo: - raise ConfigError("Synapse is not configured to use a media repo.") - - super().__init__() - media_repo = hs.get_media_repository() - - self.putChild(b"upload", UploadResource(hs, media_repo)) - self.putChild(b"download", DownloadResource(hs, media_repo)) - self.putChild( - b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) - ) - if hs.config.media.url_preview_enabled: - self.putChild( - b"preview_url", - PreviewUrlResource(hs, media_repo, media_repo.media_storage), - ) - self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py new file mode 100644 index 000000000000..a7e22a91e115 --- /dev/null +++ b/synapse/media/media_storage.py @@ -0,0 +1,374 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import logging +import os +import shutil +from types import TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Awaitable, + BinaryIO, + Callable, + Generator, + Optional, + Sequence, + Tuple, + Type, +) + +import attr + +from twisted.internet.defer import Deferred +from twisted.internet.interfaces import IConsumer +from twisted.protocols.basic import FileSender + +import synapse +from synapse.api.errors import NotFoundError +from synapse.logging.context import defer_to_thread, make_deferred_yieldable +from synapse.util import Clock +from synapse.util.file_consumer import BackgroundFileConsumer + +from ._base import FileInfo, Responder +from .filepath import MediaFilePaths + +if TYPE_CHECKING: + from synapse.media.storage_provider import StorageProvider + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class MediaStorage: + """Responsible for storing/fetching files from local sources. + + Args: + hs + local_media_directory: Base path where we store media on disk + filepaths + storage_providers: List of StorageProvider that are used to fetch and store files. + """ + + def __init__( + self, + hs: "HomeServer", + local_media_directory: str, + filepaths: MediaFilePaths, + storage_providers: Sequence["StorageProvider"], + ): + self.hs = hs + self.reactor = hs.get_reactor() + self.local_media_directory = local_media_directory + self.filepaths = filepaths + self.storage_providers = storage_providers + self.spam_checker = hs.get_spam_checker() + self.clock = hs.get_clock() + + async def store_file(self, source: IO, file_info: FileInfo) -> str: + """Write `source` to the on disk media store, and also any other + configured storage providers + + Args: + source: A file like object that should be written + file_info: Info about the file to store + + Returns: + the file path written to in the primary media store + """ + + with self.store_into_file(file_info) as (f, fname, finish_cb): + # Write to the main repository + await self.write_to_file(source, f) + await finish_cb() + + return fname + + async def write_to_file(self, source: IO, output: IO) -> None: + """Asynchronously write the `source` to `output`.""" + await defer_to_thread(self.reactor, _write_file_synchronously, source, output) + + @contextlib.contextmanager + def store_into_file( + self, file_info: FileInfo + ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]: + """Context manager used to get a file like object to write into, as + described by file_info. + + Actually yields a 3-tuple (file, fname, finish_cb), where file is a file + like object that can be written to, fname is the absolute path of file + on disk, and finish_cb is a function that returns an awaitable. + + fname can be used to read the contents from after upload, e.g. to + generate thumbnails. + + finish_cb must be called and waited on after the file has been + successfully been written to. Should not be called if there was an + error. + + Args: + file_info: Info about the file to store + + Example: + + with media_storage.store_into_file(info) as (f, fname, finish_cb): + # .. write into f ... + await finish_cb() + """ + + path = self._file_info_to_path(file_info) + fname = os.path.join(self.local_media_directory, path) + + dirname = os.path.dirname(fname) + os.makedirs(dirname, exist_ok=True) + + finished_called = [False] + + try: + with open(fname, "wb") as f: + + async def finish() -> None: + # Ensure that all writes have been flushed and close the + # file. + f.flush() + f.close() + + spam_check = await self.spam_checker.check_media_file_for_spam( + ReadableFileWrapper(self.clock, fname), file_info + ) + if spam_check != synapse.module_api.NOT_SPAM: + logger.info("Blocking media due to spam checker") + # Note that we'll delete the stored media, due to the + # try/except below. The media also won't be stored in + # the DB. + # We currently ignore any additional field returned by + # the spam-check API. + raise SpamMediaException(errcode=spam_check[0]) + + for provider in self.storage_providers: + await provider.store_file(path, file_info) + + finished_called[0] = True + + yield f, fname, finish + except Exception as e: + try: + os.remove(fname) + except Exception: + pass + + raise e from None + + if not finished_called: + raise Exception("Finished callback not called") + + async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: + """Attempts to fetch media described by file_info from the local cache + and configured storage providers. + + Args: + file_info + + Returns: + Returns a Responder if the file was found, otherwise None. + """ + paths = [self._file_info_to_path(file_info)] + + # fallback for remote thumbnails with no method in the filename + if file_info.thumbnail and file_info.server_name: + paths.append( + self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + ) + ) + + for path in paths: + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + logger.debug("responding with local file %s", local_path) + return FileResponder(open(local_path, "rb")) + logger.debug("local file %s did not exist", local_path) + + for provider in self.storage_providers: + for path in paths: + res: Any = await provider.fetch(path, file_info) + if res: + logger.debug("Streaming %s from %s", path, provider) + return res + logger.debug("%s not found on %s", path, provider) + + return None + + async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: + """Ensures that the given file is in the local cache. Attempts to + download it from storage providers if it isn't. + + Args: + file_info + + Returns: + Full path to local file + """ + path = self._file_info_to_path(file_info) + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + return local_path + + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return legacy_local_path + + dirname = os.path.dirname(local_path) + os.makedirs(dirname, exist_ok=True) + + for provider in self.storage_providers: + res: Any = await provider.fetch(path, file_info) + if res: + with res: + consumer = BackgroundFileConsumer( + open(local_path, "wb"), self.reactor + ) + await res.write_to_consumer(consumer) + await consumer.wait() + return local_path + + raise NotFoundError() + + def _file_info_to_path(self, file_info: FileInfo) -> str: + """Converts file_info into a relative path. + + The path is suitable for storing files under a directory, e.g. used to + store files on local FS under the base media repository directory. + """ + if file_info.url_cache: + if file_info.thumbnail: + return self.filepaths.url_cache_thumbnail_rel( + media_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.url_cache_filepath_rel(file_info.file_id) + + if file_info.server_name: + if file_info.thumbnail: + return self.filepaths.remote_media_thumbnail_rel( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.remote_media_filepath_rel( + file_info.server_name, file_info.file_id + ) + + if file_info.thumbnail: + return self.filepaths.local_media_thumbnail_rel( + media_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.local_media_filepath_rel(file_info.file_id) + + +def _write_file_synchronously(source: IO, dest: IO) -> None: + """Write `source` to the file like `dest` synchronously. Should be called + from a thread. + + Args: + source: A file like object that's to be written + dest: A file like object to be written to + """ + source.seek(0) # Ensure we read from the start of the file + shutil.copyfileobj(source, dest) + + +class FileResponder(Responder): + """Wraps an open file that can be sent to a request. + + Args: + open_file: A file like object to be streamed ot the client, + is closed when finished streaming. + """ + + def __init__(self, open_file: IO): + self.open_file = open_file + + def write_to_consumer(self, consumer: IConsumer) -> Deferred: + return make_deferred_yieldable( + FileSender().beginFileTransfer(self.open_file, consumer) + ) + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.open_file.close() + + +class SpamMediaException(NotFoundError): + """The media was blocked by a spam checker, so we simply 404 the request (in + the same way as if it was quarantined). + """ + + +@attr.s(slots=True, auto_attribs=True) +class ReadableFileWrapper: + """Wrapper that allows reading a file in chunks, yielding to the reactor, + and writing to a callback. + + This is simplified `FileSender` that takes an IO object rather than an + `IConsumer`. + """ + + CHUNK_SIZE = 2**14 + + clock: Clock + path: str + + async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: + """Reads the file in chunks and calls the callback with each chunk.""" + + with open(self.path, "rb") as file: + while True: + chunk = file.read(self.CHUNK_SIZE) + if not chunk: + break + + callback(chunk) + + # We yield to the reactor by sleeping for 0 seconds. + await self.clock.sleep(0) diff --git a/synapse/rest/media/v1/oembed.py b/synapse/media/oembed.py similarity index 99% rename from synapse/rest/media/v1/oembed.py rename to synapse/media/oembed.py index 7592aa5d4767..c0eaf04be544 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/media/oembed.py @@ -18,7 +18,7 @@ import attr -from synapse.rest.media.v1.preview_html import parse_html_description +from synapse.media.preview_html import parse_html_description from synapse.types import JsonDict from synapse.util import json_decoder diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/media/preview_html.py similarity index 100% rename from synapse/rest/media/v1/preview_html.py rename to synapse/media/preview_html.py diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py new file mode 100644 index 000000000000..1c9b71d69c77 --- /dev/null +++ b/synapse/media/storage_provider.py @@ -0,0 +1,181 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import logging +import os +import shutil +from typing import TYPE_CHECKING, Callable, Optional + +from synapse.config._base import Config +from synapse.logging.context import defer_to_thread, run_in_background +from synapse.util.async_helpers import maybe_awaitable + +from ._base import FileInfo, Responder +from .media_storage import FileResponder + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class StorageProvider(metaclass=abc.ABCMeta): + """A storage provider is a service that can store uploaded media and + retrieve them. + """ + + @abc.abstractmethod + async def store_file(self, path: str, file_info: FileInfo) -> None: + """Store the file described by file_info. The actual contents can be + retrieved by reading the file in file_info.upload_path. + + Args: + path: Relative path of file in local cache + file_info: The metadata of the file. + """ + + @abc.abstractmethod + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + """Attempt to fetch the file described by file_info and stream it + into writer. + + Args: + path: Relative path of file in local cache + file_info: The metadata of the file. + + Returns: + Returns a Responder if the provider has the file, otherwise returns None. + """ + + +class StorageProviderWrapper(StorageProvider): + """Wraps a storage provider and provides various config options + + Args: + backend: The storage provider to wrap. + store_local: Whether to store new local files or not. + store_synchronous: Whether to wait for file to be successfully + uploaded, or todo the upload in the background. + store_remote: Whether remote media should be uploaded + """ + + def __init__( + self, + backend: StorageProvider, + store_local: bool, + store_synchronous: bool, + store_remote: bool, + ): + self.backend = backend + self.store_local = store_local + self.store_synchronous = store_synchronous + self.store_remote = store_remote + + def __str__(self) -> str: + return "StorageProviderWrapper[%s]" % (self.backend,) + + async def store_file(self, path: str, file_info: FileInfo) -> None: + if not file_info.server_name and not self.store_local: + return None + + if file_info.server_name and not self.store_remote: + return None + + if file_info.url_cache: + # The URL preview cache is short lived and not worth offloading or + # backing up. + return None + + if self.store_synchronous: + # store_file is supposed to return an Awaitable, but guard + # against improper implementations. + await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore + else: + # TODO: Handle errors. + async def store() -> None: + try: + return await maybe_awaitable( + self.backend.store_file(path, file_info) + ) + except Exception: + logger.exception("Error storing file") + + run_in_background(store) + + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + if file_info.url_cache: + # Files in the URL preview cache definitely aren't stored here, + # so avoid any potentially slow I/O or network access. + return None + + # store_file is supposed to return an Awaitable, but guard + # against improper implementations. + return await maybe_awaitable(self.backend.fetch(path, file_info)) + + +class FileStorageProviderBackend(StorageProvider): + """A storage provider that stores files in a directory on a filesystem. + + Args: + hs + config: The config returned by `parse_config`. + """ + + def __init__(self, hs: "HomeServer", config: str): + self.hs = hs + self.cache_directory = hs.config.media.media_store_path + self.base_directory = config + + def __str__(self) -> str: + return "FileStorageProviderBackend[%s]" % (self.base_directory,) + + async def store_file(self, path: str, file_info: FileInfo) -> None: + """See StorageProvider.store_file""" + + primary_fname = os.path.join(self.cache_directory, path) + backup_fname = os.path.join(self.base_directory, path) + + dirname = os.path.dirname(backup_fname) + os.makedirs(dirname, exist_ok=True) + + # mypy needs help inferring the type of the second parameter, which is generic + shutil_copyfile: Callable[[str, str], str] = shutil.copyfile + await defer_to_thread( + self.hs.get_reactor(), + shutil_copyfile, + primary_fname, + backup_fname, + ) + + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + """See StorageProvider.fetch""" + + backup_fname = os.path.join(self.base_directory, path) + if os.path.isfile(backup_fname): + return FileResponder(open(backup_fname, "rb")) + + return None + + @staticmethod + def parse_config(config: dict) -> str: + """Called on startup to parse config supplied. This should parse + the config and raise if there is a problem. + + The returned value is passed into the constructor. + + In this case we only care about a single param, the directory, so let's + just pull that out. + """ + return Config.ensure_directory(config["directory"]) diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/media/thumbnailer.py similarity index 99% rename from synapse/rest/media/v1/thumbnailer.py rename to synapse/media/thumbnailer.py index 9480cc576332..f909a4fb9a19 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -38,7 +38,6 @@ class ThumbnailError(Exception): class Thumbnailer: - FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} @staticmethod diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/media/url_previewer.py similarity index 92% rename from synapse/rest/media/v1/preview_url_resource.py rename to synapse/media/url_previewer.py index d9f42b899ad1..d56850e1a3c4 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/media/url_previewer.py @@ -1,5 +1,5 @@ # Copyright 2016 OpenMarket Ltd -# Copyright 2020-2021 The Matrix.org Foundation C.I.C. +# Copyright 2020-2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -32,29 +32,20 @@ from synapse.api.errors import Codes, SynapseError from synapse.http.client import SimpleHttpClient -from synapse.http.server import ( - DirectServeJsonResource, - respond_with_json, - respond_with_json_bytes, -) -from synapse.http.servlet import parse_integer, parse_string -from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.media._base import FileInfo, get_filename_from_headers +from synapse.media.media_storage import MediaStorage +from synapse.media.oembed import OEmbedProvider +from synapse.media.preview_html import decode_body, parse_html_to_open_graph from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.rest.media.v1._base import get_filename_from_headers -from synapse.rest.media.v1.media_storage import MediaStorage -from synapse.rest.media.v1.oembed import OEmbedProvider -from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph from synapse.types import JsonDict, UserID from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.stringutils import random_string -from ._base import FileInfo - if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -107,22 +98,10 @@ class MediaInfo: etag: Optional[str] -class PreviewUrlResource(DirectServeJsonResource): +class UrlPreviewer: """ - The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API - for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix - specific additions). - - This does have trade-offs compared to other designs: - - * Pros: - * Simple and flexible; can be used by any clients at any point - * Cons: - * If each homeserver provides one of these independently, all the homeservers in a - room may needlessly DoS the target URI - * The URL metadata must be stored somewhere, rather than just using Matrix - itself to store the media. - * Matrix cannot be used to distribute the metadata between homeservers. + Generates an Open Graph (https://ogp.me/) responses (with some Matrix + specific additions) for a given URL. When Synapse is asked to preview a URL it does the following: @@ -163,23 +142,22 @@ class PreviewUrlResource(DirectServeJsonResource): 7. Stores the result in the database cache. 4. Returns the result. + If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or + image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole + does not fail. As much information as possible is returned. + The in-memory cache expires after 1 hour. Expired entries in the database cache (and their associated media files) are deleted every 10 seconds. The default expiration time is 1 hour from download. """ - isLeaf = True - def __init__( self, hs: "HomeServer", media_repo: "MediaRepository", media_storage: MediaStorage, ): - super().__init__() - - self.auth = hs.get_auth() self.clock = hs.get_clock() self.filepaths = media_repo.filepaths self.max_spider_size = hs.config.media.max_spider_size @@ -225,18 +203,7 @@ def __init__( self._start_expire_url_cache_data, 10 * 1000 ) - async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: - request.setHeader(b"Allow", b"OPTIONS, GET") - respond_with_json(request, 200, {}, send_cors=True) - - async def _async_render_GET(self, request: SynapseRequest) -> None: - # XXX: if get_user_by_req fails, what should we do in an async render? - requester = await self.auth.get_user_by_req(request) - url = parse_string(request, "url", required=True) - ts = parse_integer(request, "ts") - if ts is None: - ts = self.clock.time_msec() - + async def preview(self, url: str, user: UserID, ts: int) -> bytes: # XXX: we could move this into _do_preview if we wanted. url_tuple = urlsplit(url) for entry in self.url_preview_url_blacklist: @@ -283,14 +250,13 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: observable = self._cache.get(url) if not observable: - download = run_in_background(self._do_preview, url, requester.user, ts) + download = run_in_background(self._do_preview, url, user, ts) observable = ObservableDeferred(download, consumeErrors=True) self._cache[url] = observable else: logger.info("Returning cached response") - og = await make_deferred_yieldable(observable.observe()) - respond_with_json_bytes(request, 200, og, send_cors=True) + return await make_deferred_yieldable(observable.observe()) async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: """Check the db, and download the URL and build a preview @@ -365,16 +331,25 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: oembed_url = self._oembed.autodiscover_from_html(tree) og_from_oembed: JsonDict = {} if oembed_url: - oembed_info = await self._handle_url( - oembed_url, user, allow_data_urls=True - ) - ( - og_from_oembed, - author_name, - expiration_ms, - ) = await self._handle_oembed_response( - url, oembed_info, expiration_ms - ) + try: + oembed_info = await self._handle_url( + oembed_url, user, allow_data_urls=True + ) + except Exception as e: + # Fetching the oEmbed info failed, don't block the entire URL preview. + logger.warning( + "oEmbed fetch failed during URL preview: %s errored with %s", + oembed_url, + e, + ) + else: + ( + og_from_oembed, + author_name, + expiration_ms, + ) = await self._handle_oembed_response( + url, oembed_info, expiration_ms + ) # Parse Open Graph information from the HTML in case the oEmbed # response failed or is incomplete. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index b01372565d14..8ce58872293a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -87,7 +87,6 @@ class LaterGauge(Collector): ] def collect(self) -> Iterable[Metric]: - g = GaugeMetricFamily(self.name, self.desc, labels=self.labels) try: diff --git a/synapse/metrics/_gc.py b/synapse/metrics/_gc.py index b7d47ce3e744..a22c4e5bbd1a 100644 --- a/synapse/metrics/_gc.py +++ b/synapse/metrics/_gc.py @@ -139,7 +139,6 @@ def _maybe_gc() -> None: class PyPyGCStats(Collector): def collect(self) -> Iterable[Metric]: - # @stats is a pretty-printer object with __str__() returning a nice table, # plus some fields that contain data from that table. # unfortunately, fields are pretty-printed themselves (i. e. '4.5MB'). diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d22dd19d388a..ae07b13bb7ff 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -64,20 +64,15 @@ CHECK_EVENT_ALLOWED_CALLBACK, CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, ON_CREATE_ROOM_CALLBACK, ON_NEW_EVENT_CALLBACK, ON_PROFILE_UPDATE_CALLBACK, + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, ON_THREEPID_BIND_CALLBACK, ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK -from synapse.handlers.account_validity import ( - IS_USER_EXPIRED_CALLBACK, - ON_LEGACY_ADMIN_REQUEST, - ON_LEGACY_RENEW_CALLBACK, - ON_LEGACY_SEND_MAIL_CALLBACK, - ON_USER_REGISTRATION_CALLBACK, -) from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, @@ -103,6 +98,13 @@ run_in_background, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.module_api.callbacks.account_validity_callbacks import ( + IS_USER_EXPIRED_CALLBACK, + ON_LEGACY_ADMIN_REQUEST, + ON_LEGACY_RENEW_CALLBACK, + ON_LEGACY_SEND_MAIL_CALLBACK, + ON_USER_REGISTRATION_CALLBACK, +) from synapse.rest.client.login import LoginResponse from synapse.storage import DataStore from synapse.storage.background_updates import ( @@ -248,6 +250,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._push_rules_handler = hs.get_push_rules_handler() self._device_handler = hs.get_device_handler() self.custom_template_dir = hs.config.server.custom_template_directory + self._callbacks = hs.get_module_api_callbacks() try: app_name = self._hs.config.email.email_app_name @@ -269,7 +272,6 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._account_data_manager = AccountDataManager(hs) self._spam_checker = hs.get_spam_checker() - self._account_validity_handler = hs.get_account_validity_handler() self._third_party_event_rules = hs.get_third_party_event_rules() self._password_auth_provider = hs.get_password_auth_provider() self._presence_router = hs.get_presence_router() @@ -330,7 +332,7 @@ def register_account_validity_callbacks( Added in Synapse v1.39.0. """ - return self._account_validity_handler.register_account_validity_callbacks( + return self._callbacks.account_validity.register_callbacks( is_user_expired=is_user_expired, on_user_registration=on_user_registration, on_legacy_send_mail=on_legacy_send_mail, @@ -357,6 +359,12 @@ def register_third_party_rules_callbacks( ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = None, on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, + on_add_user_third_party_identifier: Optional[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, + on_remove_user_third_party_identifier: Optional[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -373,6 +381,8 @@ def register_third_party_rules_callbacks( on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, on_threepid_bind=on_threepid_bind, + on_add_user_third_party_identifier=on_add_user_third_party_identifier, + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier, ) def register_presence_router_callbacks( @@ -1576,14 +1586,41 @@ async def create_room( ) requester = create_requester(user_id) - room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room( + room_id, room_alias, _ = await self._hs.get_room_creation_handler().create_room( requester=requester, config=config, ratelimit=ratelimit, creator_join_profile=creator_join_profile, ) + room_alias_str = room_alias.to_string() if room_alias else None + return room_id, room_alias_str + + async def set_displayname( + self, + user_id: UserID, + new_displayname: str, + deactivation: bool = False, + ) -> None: + """Sets a user's display name. - return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None) + Added in Synapse v1.76.0. + + Args: + user_id: + The user whose display name is to be changed. + new_displayname: + The new display name to give the user. + deactivation: + Whether this change was made while deactivating the user. + """ + requester = create_requester(user_id) + await self._hs.get_profile_handler().set_displayname( + target_user=user_id, + requester=requester, + new_displayname=new_displayname, + by_admin=True, + deactivation=deactivation, + ) async def set_displayname( self, diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py new file mode 100644 index 000000000000..3d977bf6558a --- /dev/null +++ b/synapse/module_api/callbacks/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.module_api.callbacks.account_validity_callbacks import ( + AccountValidityModuleApiCallbacks, +) + + +class ModuleApiCallbacks: + def __init__(self) -> None: + self.account_validity = AccountValidityModuleApiCallbacks() diff --git a/synapse/module_api/callbacks/account_validity_callbacks.py b/synapse/module_api/callbacks/account_validity_callbacks.py new file mode 100644 index 000000000000..531d0c9ddcf4 --- /dev/null +++ b/synapse/module_api/callbacks/account_validity_callbacks.py @@ -0,0 +1,93 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Awaitable, Callable, List, Optional, Tuple + +from twisted.web.http import Request + +logger = logging.getLogger(__name__) + +# Types for callbacks to be registered via the module api +IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]] +ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable] +# Temporary hooks to allow for a transition from `/_matrix/client` endpoints +# to `/_synapse/client/account_validity`. See `register_callbacks` below. +ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] +ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]] +ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable] + + +class AccountValidityModuleApiCallbacks: + def __init__(self) -> None: + self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] + self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = [] + self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None + self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None + + # The legacy admin requests callback isn't a protected attribute because we need + # to access it from the admin servlet, which is outside of this handler. + self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None + + def register_callbacks( + self, + is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, + on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, + on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, + on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, + on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, + ) -> None: + """Register callbacks from module for each hook.""" + if is_user_expired is not None: + self.is_user_expired_callbacks.append(is_user_expired) + + if on_user_registration is not None: + self.on_user_registration_callbacks.append(on_user_registration) + + # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and + # an admin one). As part of moving the feature into a module, we need to change + # the path from /_matrix/client/unstable/account_validity/... to + # /_synapse/client/account_validity, because: + # + # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix + # * the way we register servlets means that modules can't register resources + # under /_matrix/client + # + # We need to allow for a transition period between the old and new endpoints + # in order to allow for clients to update (and for emails to be processed). + # + # Once the email-account-validity module is loaded, it will take control of account + # validity by moving the rows from our `account_validity` table into its own table. + # + # Therefore, we need to allow modules (in practice just the one implementing the + # email-based account validity) to temporarily hook into the legacy endpoints so we + # can route the traffic coming into the old endpoints into the module, which is + # why we have the following three temporary hooks. + if on_legacy_send_mail is not None: + if self.on_legacy_send_mail_callback is not None: + raise RuntimeError("Tried to register on_legacy_send_mail twice") + + self.on_legacy_send_mail_callback = on_legacy_send_mail + + if on_legacy_renew is not None: + if self.on_legacy_renew_callback is not None: + raise RuntimeError("Tried to register on_legacy_renew twice") + + self.on_legacy_renew_callback = on_legacy_renew + + if on_legacy_admin_request is not None: + if self.on_legacy_admin_request_callback is not None: + raise RuntimeError("Tried to register on_legacy_admin_request twice") + + self.on_legacy_admin_request_callback = on_legacy_admin_request diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8ba634e69154..8e6367aa9e93 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -22,7 +22,7 @@ List, Mapping, Optional, - Set, + Sequence, Tuple, Union, ) @@ -43,6 +43,7 @@ from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator +from synapse.types import JsonValue from synapse.types.state import StateFilter from synapse.util.caches import register_cache from synapse.util.metrics import measure_func @@ -149,7 +150,7 @@ async def _get_rules_for_event( # little, we can skip fetching a huge number of push rules in large rooms. # This helps make joins and leaves faster. if event.type == EventTypes.Member: - local_users = [] + local_users: Sequence[str] = [] # We never notify a user about their own actions. This is enforced in # `_action_for_event_by_user` in the loop over `rules_by_user`, but we # do the same check here to avoid unnecessary DB queries. @@ -184,7 +185,6 @@ async def _get_rules_for_event( if event.type == EventTypes.Member and event.membership == Membership.INVITE: invited = event.state_key if invited and self.hs.is_mine_id(invited) and invited not in local_users: - local_users = list(local_users) local_users.append(invited) if not local_users: @@ -257,13 +257,15 @@ async def _get_power_levels_and_sender_level( return pl_event.content if pl_event else {}, sender_level - async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]: + async def _related_events( + self, event: EventBase + ) -> Dict[str, Dict[str, JsonValue]]: """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation Returns: Mapping of relation type to flattened events. """ - related_events: Dict[str, Dict[str, str]] = {} + related_events: Dict[str, Dict[str, JsonValue]] = {} if self._related_event_match_enabled: related_event_id = event.content.get("m.relates_to", {}).get("event_id") relation_type = event.content.get("m.relates_to", {}).get("rel_type") @@ -322,7 +324,6 @@ async def _action_for_event_by_user( context: EventContext, event_id_to_event: Mapping[str, EventBase], ) -> None: - if ( not event.internal_metadata.is_notifiable() or event.internal_metadata.is_historical() @@ -384,27 +385,14 @@ async def _action_for_event_by_user( del notification_levels[key] # Pull out any user and room mentions. - mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) - has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict) - user_mentions: Set[str] = set() - room_mention = False - if has_mentions: - # mypy seems to have lost the type even though it must be a dict here. - assert isinstance(mentions, dict) - # Remove out any non-string items and convert to a set. - user_mentions_raw = mentions.get("user_ids") - if isinstance(user_mentions_raw, list): - user_mentions = set( - filter(lambda item: isinstance(item, str), user_mentions_raw) - ) - # Room mention is only true if the value is exactly true. - room_mention = mentions.get("room") is True + has_mentions = ( + self._intentional_mentions_enabled + and EventContentFields.MSC3952_MENTIONS in event.content + ) evaluator = PushRuleEvaluator( _flatten_dict(event), has_mentions, - user_mentions, - room_mention, non_bot_room_member_count, sender_power_level, notification_levels, @@ -497,16 +485,20 @@ async def _action_for_event_by_user( StateGroup = Union[object, int] +def _is_simple_value(value: Any) -> bool: + return isinstance(value, (bool, str)) or type(value) is int or value is None + + def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, - result: Optional[Dict[str, str]] = None, -) -> Dict[str, str]: + result: Optional[Dict[str, JsonValue]] = None, +) -> Dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, flatten it into a single layer dictionary by combining the keys & sub-keys. - Any (non-dictionary), non-string value is dropped. + String, integer, boolean, null or lists of those values are kept. All others are dropped. Transforms: @@ -529,11 +521,15 @@ def _flatten_dict( if result is None: result = {} for key, value in d.items(): - if isinstance(value, str): - result[".".join(prefix + [key])] = value.lower() - elif isinstance(value, bool): - # Beeper: we need this for fi.mau.auto_accept_invite boolean - result[".".join(prefix + [key])] = str(value).lower() + # Escape periods in the key with a backslash (and backslashes with an + # extra backslash). This is since a period is used as a separator between + # nested fields. + key = key.replace("\\", "\\\\").replace(".", "\\.") + + if _is_simple_value(value): + result[".".join(prefix + [key])] = value + elif isinstance(value, (list, tuple)): + result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)] elif isinstance(value, Mapping): # do not set `room_version` due to recursion considerations below _flatten_dict(value, prefix=(prefix + [key]), result=result) diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index bb76c169c6ce..222afbdcc872 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -41,11 +41,12 @@ def format_push_rules_for_user( rulearray.append(template_rule) - pattern_type = template_rule.pop("pattern_type", None) - if pattern_type == "user_id": - template_rule["pattern"] = user.to_string() - elif pattern_type == "user_localpart": - template_rule["pattern"] = user.localpart + for type_key in ("pattern", "value"): + type_value = template_rule.pop(f"{type_key}_type", None) + if type_value == "user_id": + template_rule[type_key] = user.to_string() + elif type_value == "user_localpart": + template_rule[type_key] = user.localpart template_rule["enabled"] = enabled diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 2374f810c94f..111ec07e64af 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -265,7 +265,6 @@ def __init__(self, hs: "HomeServer"): @staticmethod async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override] - return {} async def _handle_request( # type: ignore[override] diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index ecea6fc915c7..cc3929dcf565 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -195,7 +195,6 @@ def __init__(self, hs: "HomeServer"): async def _serialize_payload( # type: ignore[override] user_id: str, device_id: str, keys: JsonDict ) -> JsonDict: - return { "user_id": user_id, "device_id": device_id, diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 9fa1060d48f6..67b01db67e7f 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -142,17 +142,12 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, - request: SynapseRequest, - content: JsonDict, - room_id: str, - user_id: str, + self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str ) -> Tuple[int, JsonDict]: remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] requester = Requester.deserialize(self.store, content["requester"]) - request.requester = requester logger.debug("remote_knock: %s on room: %s", user_id, room_id) @@ -277,16 +272,12 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, - request: SynapseRequest, - content: JsonDict, - knock_event_id: str, + self, request: SynapseRequest, content: JsonDict, knock_event_id: str ) -> Tuple[int, JsonDict]: txn_id = content["txn_id"] event_content = content["content"] requester = Requester.deserialize(self.store, content["requester"]) - request.requester = requester # hopefully we're now on the master, so this won't recurse! @@ -363,3 +354,5 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) + ReplicationRemoteKnockRestServlet(hs).register(http_server) + ReplicationRemoteRescindKnockRestServlet(hs).register(http_server) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index cc0528bd8e5f..200f667fdf27 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -18,16 +18,12 @@ from twisted.internet import defer from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IAddress, IConnector -from twisted.internet.protocol import ReconnectingClientFactory -from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, @@ -53,7 +49,6 @@ from synapse.util.metrics import Measure if TYPE_CHECKING: - from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -62,52 +57,6 @@ _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5 -class DirectTcpReplicationClientFactory(ReconnectingClientFactory): - """Factory for building connections to the master. Will reconnect if the - connection is lost. - - Accepts a handler that is passed to `ClientReplicationStreamProtocol`. - """ - - initialDelay = 0.1 - maxDelay = 1 # Try at least once every N seconds - - def __init__( - self, - hs: "HomeServer", - client_name: str, - command_handler: "ReplicationCommandHandler", - ): - self.client_name = client_name - self.command_handler = command_handler - self.server_name = hs.config.server.server_name - self.hs = hs - self._clock = hs.get_clock() # As self.clock is defined in super class - - hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying) - - def startedConnecting(self, connector: IConnector) -> None: - logger.info("Connecting to replication: %r", connector.getDestination()) - - def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol: - logger.info("Connected to replication: %r", addr) - return ClientReplicationStreamProtocol( - self.hs, - self.client_name, - self.server_name, - self._clock, - self.command_handler, - ) - - def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None: - logger.error("Lost replication conn: %r", reason) - ReconnectingClientFactory.clientConnectionLost(self, connector, reason) - - def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None: - logger.error("Failed to connect to replication: %r", reason) - ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) - - class ReplicationDataHandler: """Handles incoming stream updates from replication. @@ -370,15 +319,23 @@ async def wait_for_stream_position( # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): logger.info( - "Waiting for repl stream %r to reach %s (%s)", + "Waiting for repl stream %r to reach %s (%s); currently at: %s", stream_name, position, instance_name, + current_position, ) try: await make_deferred_yieldable(deferred) except defer.TimeoutError: - logger.error("Timed out waiting for stream %s", stream_name) + logger.error( + "Timed out waiting for repl stream %r to reach %s (%s)" + "; currently at: %s", + stream_name, + position, + instance_name, + self._streams[stream_name].current_token(instance_name), + ) return logger.info( diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 0ad31a05c8ad..46eb5c26ecff 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -627,23 +627,6 @@ def on_REMOTE_SERVER_UP( self._notifier.notify_remote_server_up(cmd.data) - # We relay to all other connections to ensure every instance gets the - # notification. - # - # When configured to use redis we'll always only have one connection and - # so this is a no-op (all instances will have already received the same - # REMOTE_SERVER_UP command). - # - # For direct TCP connections this will relay to all other connections - # connected to us. When on master this will correctly fan out to all - # other direct TCP clients and on workers there'll only be the one - # connection to master. - # - # (The logic here should also be sound if we have a mix of Redis and - # direct TCP connections so long as there is only one traffic route - # between two instances, but that is not currently supported). - self.send_command(cmd, ignore_conn=conn) - def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" self._connections.append(connection) @@ -691,21 +674,14 @@ def connected(self) -> bool: """ return bool(self._connections) - def send_command( - self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None - ) -> None: + def send_command(self, cmd: Command) -> None: """Send a command to all connected connections. Args: cmd - ignore_conn: If set don't send command to the given connection. - Used when relaying commands from one connection to all others. """ if self._connections: for connection in self._connections: - if connection == ignore_conn: - continue - try: connection.send_command(cmd) except Exception: diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index fd1c0ec6afa2..dfc061eb5e38 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -328,7 +328,6 @@ def __init__( outbound_redis_connection: txredisapi.ConnectionHandler, channel_names: List[str], ): - super().__init__( hs, uuid="subscriber", diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 9d17eff71451..347467d863ec 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -238,6 +238,24 @@ async def _run_notifier_loop(self) -> None: except Exception: logger.exception("Failed to replicate") + # The last token we send may not match the current + # token, in which case we want to send out a `POSITION` + # to tell other workers the actual current position. + if updates[-1][0] < current_token: + logger.info( + "Sending position: %s -> %s", + stream.NAME, + current_token, + ) + self.command_handler.send_command( + PositionCommand( + stream.NAME, + self._instance_name, + updates[-1][0], + current_token, + ) + ) + logger.debug("No more pending updates, breaking poke loop") finally: self.pending_updates = False diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 14b6705862ac..ad9b76071383 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -139,7 +139,6 @@ async def _update_function( current_token: Token, target_row_count: int, ) -> StreamUpdateResult: - # the events stream merges together three separate sources: # * new events # * current_state changes diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 14c4e6ebbbee..55b448adfdba 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -20,6 +20,7 @@ account, account_data, account_validity, + appservice_ping, auth, capabilities, devices, @@ -108,8 +109,7 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: if is_main_process: logout.register_servlets(hs, client_resource) sync.register_servlets(hs, client_resource) - if is_main_process: - filter.register_servlets(hs, client_resource) + filter.register_servlets(hs, client_resource) account.register_servlets(hs, client_resource) register.register_servlets(hs, client_resource) if is_main_process: @@ -140,7 +140,8 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: relations.register_servlets(hs, client_resource) if is_main_process: password_policy.register_servlets(hs, client_resource) - knock.register_servlets(hs, client_resource) + knock.register_servlets(hs, client_resource) + appservice_ping.register_servlets(hs, client_resource) # moving to /_synapse/admin if is_main_process: diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index a3beb74e2c3d..c546ef7e2305 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -53,11 +53,11 @@ class EventReportsRestServlet(RestServlet): PATTERNS = admin_patterns("/event_reports$") def __init__(self, hs: "HomeServer"): - self.auth = hs.get_auth() - self.store = hs.get_datastores().main + self._auth = hs.get_auth() + self._store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_requester_is_admin(self._auth, request) start = parse_integer(request, "from", default=0) limit = parse_integer(request, "limit", default=100) @@ -79,7 +79,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - event_reports, total = await self.store.get_event_reports_paginate( + event_reports, total = await self._store.get_event_reports_paginate( start, limit, direction, user_id, room_id ) ret = {"event_reports": event_reports, "total": total} @@ -108,13 +108,13 @@ class EventReportDetailRestServlet(RestServlet): PATTERNS = admin_patterns("/event_reports/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.auth = hs.get_auth() - self.store = hs.get_datastores().main + self._auth = hs.get_auth() + self._store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, report_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_requester_is_admin(self._auth, request) message = ( "The report_id parameter must be a string representing a positive integer." @@ -131,8 +131,33 @@ async def on_GET( HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM ) - ret = await self.store.get_event_report(resolved_report_id) + ret = await self._store.get_event_report(resolved_report_id) if not ret: raise NotFoundError("Event report not found") return HTTPStatus.OK, ret + + async def on_DELETE( + self, request: SynapseRequest, report_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + message = ( + "The report_id parameter must be a string representing a positive integer." + ) + try: + resolved_report_id = int(report_id) + except ValueError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM + ) + + if resolved_report_id < 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM + ) + + if await self._store.delete_event_report(resolved_report_id): + return HTTPStatus.OK, {} + + raise NotFoundError("Event report not found") diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 0d072c42a7aa..c134ccfb3d3c 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError @@ -285,7 +285,12 @@ class DeleteMediaByDateSize(RestServlet): timestamp and size. """ - PATTERNS = admin_patterns("/media/(?P[^/]*)/delete$") + PATTERNS = [ + *admin_patterns("/media/delete$"), + # This URL kept around for legacy reasons, it is undesirable since it + # overlaps with the DeleteMediaByID servlet. + *admin_patterns("/media/(?P[^/]*)/delete$"), + ] def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main @@ -294,7 +299,7 @@ def __init__(self, hs: "HomeServer"): self.media_repository = hs.get_media_repository() async def on_POST( - self, request: SynapseRequest, server_name: str + self, request: SynapseRequest, server_name: Optional[str] = None ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -322,7 +327,8 @@ async def on_POST( errcode=Codes.INVALID_PARAM, ) - if self.server_name != server_name: + # This check is useless, we keep it for the legacy endpoint only. + if server_name is not None and self.server_name != server_name: raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media") logging.info( @@ -489,6 +495,8 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) ProtectMediaByID(hs).register(http_server) UnprotectMediaByID(hs).register(http_server) ListMediaInRoom(hs).register(http_server) - DeleteMediaByID(hs).register(http_server) + # XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as + # their URL routes overlap. DeleteMediaByDateSize(hs).register(http_server) + DeleteMediaByID(hs).register(http_server) UserMediaRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 97d2da5dd4d2..1077b1890b13 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -83,7 +83,6 @@ def __init__(self, hs: "HomeServer"): async def on_DELETE( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -152,7 +151,6 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self._auth, request) if not RoomID.is_valid(room_id): @@ -189,7 +187,6 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, delete_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self._auth, request) delete_status = self._pagination_handler.get_delete_status(delete_id) @@ -452,7 +449,6 @@ async def on_GET( class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): - PATTERNS = admin_patterns("/join/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 15da9cd88153..7dd1c10b91b4 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Optional, Tuple +from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import EventTypes from synapse.api.errors import NotFoundError, SynapseError @@ -23,10 +23,10 @@ parse_json_object_from_request, ) from synapse.http.site import SynapseRequest -from synapse.rest.admin import assert_requester_is_admin -from synapse.rest.admin._base import admin_patterns +from synapse.logging.opentracing import set_tag +from synapse.rest.admin._base import admin_patterns, assert_user_is_admin from synapse.rest.client.transactions import HttpTransactionCache -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, Requester, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -70,10 +70,13 @@ def register(self, json_resource: HttpServer) -> None: self.__class__.__name__, ) - async def on_POST( - self, request: SynapseRequest, txn_id: Optional[str] = None + async def _do( + self, + request: SynapseRequest, + requester: Requester, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_user_is_admin(self.auth, requester) body = parse_json_object_from_request(request) assert_params_in_dict(body, ("user_id", "content")) event_type = body.get("type", EventTypes.Message) @@ -106,9 +109,18 @@ async def on_POST( return HTTPStatus.OK, {"event_id": event.event_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester, None) + + async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, txn_id + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + set_tag("txn_id", txn_id) + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, txn_id ) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 5f15b92c60c8..66c5a28145f9 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -306,13 +306,20 @@ async def on_PUT( # remove old threepids for medium, address in del_threepids: try: - await self.auth_handler.delete_threepid( - user_id, medium, address, None + # Attempt to remove any known bindings of this third-party ID + # and user ID from identity servers. + await self.hs.get_identity_handler().try_unbind_threepid( + user_id, medium, address, id_server=None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") + # Delete the local association of this user ID and third-party ID. + await self.auth_handler.delete_local_threepid( + user_id, medium, address + ) + # add new threepids current_time = self.hs.get_clock().time_msec() for medium, address in add_threepids: @@ -678,15 +685,18 @@ class AccountValidityRenewServlet(RestServlet): PATTERNS = admin_patterns("/account_validity/validity$") def __init__(self, hs: "HomeServer"): - self.account_activity_handler = hs.get_account_validity_handler() + self.account_validity_handler = hs.get_account_validity_handler() + self.account_validity_module_callbacks = ( + hs.get_module_api_callbacks().account_validity + ) self.auth = hs.get_auth() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - if self.account_activity_handler.on_legacy_admin_request_callback: - expiration_ts = await ( - self.account_activity_handler.on_legacy_admin_request_callback(request) + if self.account_validity_module_callbacks.on_legacy_admin_request_callback: + expiration_ts = await self.account_validity_module_callbacks.on_legacy_admin_request_callback( + request ) else: body = parse_json_object_from_request(request) @@ -697,7 +707,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "Missing property 'user_id' in the request body", ) - expiration_ts = await self.account_activity_handler.renew_account_for_user( + expiration_ts = await self.account_validity_handler.renew_account_for_user( body["user_id"], body.get("expiration_ts"), not body.get("enable_renewal_emails", True), @@ -1194,7 +1204,8 @@ async def on_GET( if not await self._store.get_user_by_id(user_id): raise NotFoundError("User not found") - global_data, by_room_data = await self._store.get_account_data_for_user(user_id) + global_data = await self._store.get_global_account_data_for_user(user_id) + by_room_data = await self._store.get_room_account_data_for_user(user_id) return HTTPStatus.OK, { "account_data": { "global": global_data, diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 4373c7366262..484d7440a442 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -415,6 +415,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: request, MsisdnRequestTokenBody ) msisdn = phone_number_to_msisdn(body.country, body.phone_number) + logger.info("Request #%s to verify ownership of %s", body.send_attempt, msisdn) if not await check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( @@ -444,6 +445,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} + logger.info("MSISDN %s is already in use by %s", msisdn, existing_user_id) raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) if not self.hs.config.registration.account_threepid_delegate_msisdn: @@ -468,6 +470,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe( body.send_attempt ) + logger.info("MSISDN %s: got response from identity server: %s", msisdn, ret) return 200, ret @@ -734,12 +737,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # Attempt to unbind the threepid from an identity server. If id_server is None, try to # unbind from all identity servers this threepid has been added to in the past result = await self.identity_handler.try_unbind_threepid( - requester.user.to_string(), - { - "address": body.address, - "medium": body.medium, - "id_server": body.id_server, - }, + requester.user.to_string(), body.medium, body.address, body.id_server ) return 200, {"id_server_unbind_result": "success" if result else "no-support"} @@ -770,7 +768,9 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id = requester.user.to_string() try: - ret = await self.auth_handler.delete_threepid( + # Attempt to remove any known bindings of this third-party ID + # and user ID from identity servers. + ret = await self.hs.get_identity_handler().try_unbind_threepid( user_id, body.medium, body.address, body.id_server ) except Exception: @@ -785,6 +785,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: else: id_server_unbind_result = "no-support" + # Delete the local association of this user ID and third-party ID. + await self.auth_handler.delete_local_threepid( + user_id, body.medium, body.address + ) + return 200, {"id_server_unbind_result": id_server_unbind_result} diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py new file mode 100644 index 000000000000..31466a4ad4cb --- /dev/null +++ b/synapse/rest/client/appservice_ping.py @@ -0,0 +1,115 @@ +# Copyright 2023 Tulir Asokan +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, Tuple + +from synapse.api.errors import ( + CodeMessageException, + Codes, + HttpResponseException, + SynapseError, +) +from synapse.http import RequestTimedOutError +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.types import JsonDict + +from ._base import client_patterns + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class AppservicePingRestServlet(RestServlet): + PATTERNS = client_patterns( + "/fi.mau.msc2659/appservice/(?P[^/]*)/ping", + unstable=True, + releases=(), + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.as_api = hs.get_application_service_api() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, appservice_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + if not requester.app_service: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "Only application services can use the /appservice/ping endpoint", + Codes.FORBIDDEN, + ) + elif requester.app_service.id != appservice_id: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "Mismatching application service ID in path", + Codes.FORBIDDEN, + ) + elif not requester.app_service.url: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The application service does not have a URL set", + Codes.AS_PING_URL_NOT_SET, + ) + + content = parse_json_object_from_request(request) + txn_id = content.get("transaction_id", None) + + start = time.monotonic() + try: + await self.as_api.ping(requester.app_service, txn_id) + except RequestTimedOutError as e: + raise SynapseError( + HTTPStatus.GATEWAY_TIMEOUT, + e.msg, + Codes.AS_PING_CONNECTION_TIMEOUT, + ) + except CodeMessageException as e: + additional_fields: Dict[str, Any] = {"status": e.code} + if isinstance(e, HttpResponseException): + try: + additional_fields["body"] = e.response.decode("utf-8") + except UnicodeDecodeError: + pass + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + f"HTTP {e.code} {e.msg}", + Codes.AS_PING_BAD_STATUS, + additional_fields=additional_fields, + ) + except Exception as e: + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + f"{type(e).__name__}: {e}", + Codes.AS_PING_CONNECTION_FAILED, + ) + + duration = time.monotonic() - start + + return HTTPStatus.OK, {"duration": int(duration * 1000)} + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.experimental.msc2659_enabled: + AppservicePingRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index eb77337044da..276a1b405d44 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -97,7 +97,6 @@ async def on_GET(self, request: SynapseRequest, stagetype: str) -> None: return None async def on_POST(self, request: Request, stagetype: str) -> None: - session = parse_string(request, "session") if not session: raise SynapseError(400, "No session supplied") diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 486c6dbbc5b3..dab4a77f7e7c 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -255,7 +255,7 @@ class DehydratedDeviceServlet(RestServlet): """ - PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=()) + PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device$", releases=()) def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 782e7d14e867..694d77d28730 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Dict, List, Tuple, Union from synapse.api.errors import SynapseError +from synapse.events.utils import SerializeEventConfig from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -43,9 +44,8 @@ def __init__(self, hs: "HomeServer"): async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - is_guest = requester.is_guest args: Dict[bytes, List[bytes]] = request.args # type: ignore - if is_guest: + if requester.is_guest: if b"room_id" not in args: raise SynapseError(400, "Guest users must specify room_id param") room_id = parse_string(request, "room_id") @@ -63,13 +63,12 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: as_client_event = b"raw" not in args chunk = await self.event_stream_handler.get_stream( - requester.user.to_string(), + requester, pagin_config, timeout=timeout, as_client_event=as_client_event, - affect_presence=(not is_guest), + affect_presence=(not requester.is_guest), room_id=room_id, - is_guest=is_guest, ) return 200, chunk @@ -91,9 +90,12 @@ async def on_GET( requester = await self.auth.get_user_by_req(request) event = await self.event_handler.get_event(requester.user, None, event_id) - time_now = self.clock.time_msec() if event: - result = self._event_serializer.serialize_event(event, time_now) + result = self._event_serializer.serialize_event( + event, + self.clock.time_msec(), + config=SerializeEventConfig(requester=requester), + ) return 200, result else: return 404, "Event not found." diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index cc1c2f973140..236199897ce0 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -79,7 +79,6 @@ def __init__(self, hs: "HomeServer"): async def on_POST( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: - target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 7873b363c06c..32bb8b9a91a6 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -312,15 +312,29 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id = requester.user.to_string() body = parse_json_object_from_request(request) - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body, - "add a device signing key to your account", - # Allow skipping of UI auth since this is frequently called directly - # after login and it is silly to ask users to re-auth immediately. - can_skip_ui_auth=True, - ) + if self.hs.config.experimental.msc3967_enabled: + if await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id): + # If we already have a master key then cross signing is set up and we require UIA to reset + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + "reset the device signing key on your account", + # Do not allow skipping of UIA auth. + can_skip_ui_auth=False, + ) + # Otherwise we don't require UIA since we are setting up cross signing for first time + else: + # Previous behaviour is to always require UIA but allow it to be skipped + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + "add a device signing key to your account", + # Allow skipping of UI auth since this is frequently called directly + # after login and it is silly to ask users to re-auth immediately. + can_skip_ui_auth=True, + ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) return 200, result diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index ad025c8a4529..4fa66904baa1 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple from synapse.api.constants import Membership from synapse.api.errors import SynapseError @@ -24,8 +24,6 @@ parse_strings_from_args, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag -from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict, RoomAlias, RoomID if TYPE_CHECKING: @@ -45,7 +43,6 @@ class KnockRoomAliasServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.txns = HttpTransactionCache(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -53,7 +50,6 @@ async def on_POST( self, request: SynapseRequest, room_identifier: str, - txn_id: Optional[str] = None, ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -67,7 +63,6 @@ async def on_POST( # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args( args, "server_name", required=False ) @@ -86,7 +81,6 @@ async def on_POST( target=requester.user, room_id=room_id, action=Membership.KNOCK, - txn_id=txn_id, third_party_signed=None, remote_room_hosts=remote_room_hosts, content=event_content, @@ -94,15 +88,6 @@ async def on_POST( return 200, {"room_id": room_id} - def on_PUT( - self, request: SynapseRequest, room_identifier: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) - - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_identifier, txn_id - ) - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: KnockRoomAliasServlet(hs).register(http_server) diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 4bb44c24dba1..21b0de00b9e3 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -73,6 +73,12 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: next_token = None + serialize_options = SerializeEventConfig( + event_format=format_event_for_client_v2_without_room_id, + requester=requester, + ) + now = self.clock.time_msec() + for pa in push_actions: returned_pa = { "room_id": pa.room_id, @@ -82,10 +88,8 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "event": ( self._event_serializer.serialize_event( notif_events[pa.event_id], - self.clock.time_msec(), - config=SerializeEventConfig( - event_format=format_event_for_client_v2_without_room_id - ), + now, + config=serialize_options, ) ), } diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index e499bfa46fac..d9c03f6902a3 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -629,10 +629,12 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = await ( - self.password_auth_provider.get_username_for_registration( - auth_result, - params, + desired_username = ( + await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, + ) ) ) @@ -683,9 +685,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: session_id ) - display_name = await ( - self.password_auth_provider.get_displayname_for_registration( - auth_result, params + display_name = ( + await ( + self.password_auth_provider.get_displayname_for_registration( + auth_result, params + ) ) ) @@ -953,7 +957,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) - UsernameAvailabilityRestServlet(hs).register(http_server) RegistrationSubmitTokenServlet(hs).register(http_server) + UsernameAvailabilityRestServlet(hs).register(http_server) RegistrationTokenValidityRestServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index e2b410cf32a3..ac1a63ca2745 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -16,7 +16,7 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -39,6 +39,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastores().main + self._event_handler = self.hs.get_event_handler() async def on_POST( self, request: SynapseRequest, room_id: str, event_id: str @@ -61,6 +62,20 @@ async def on_POST( Codes.BAD_JSON, ) + try: + event = await self._event_handler.get_event( + requester.user, room_id, event_id, show_redacted=False + ) + except AuthError: + # The event exists, but this user is not allowed to access this event. + event = None + + if event is None: + raise NotFoundError( + "Unable to report event: " + "it does not exist or you aren't able to see it." + ) + await self.store.add_event_report( room_id=room_id, event_id=event_id, diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index a63a87c40e71..5715ece81965 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -37,7 +37,7 @@ UnredactedContentDeletedError, ) from synapse.api.filtering import Filter -from synapse.events.utils import format_event_for_client_v2 +from synapse.events.utils import SerializeEventConfig, format_event_for_client_v2 from synapse.http.server import HttpServer from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -57,7 +57,7 @@ from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID +from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util import json_decoder from synapse.util.cancellation import cancellable @@ -151,20 +151,27 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) - def on_PUT( + async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request(request, self.on_POST, request) + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester + ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester) - info, _ = await self._room_creation_handler.create_room( + async def _do( + self, request: SynapseRequest, requester: Requester + ) -> Tuple[int, JsonDict]: + room_id, _, _ = await self._room_creation_handler.create_room( requester, self.get_room_config(request) ) - return 200, info + return 200, {"room_id": room_id} def get_room_config(self, request: Request) -> JsonDict: user_supplied_config = parse_json_object_from_request(request) @@ -172,9 +179,9 @@ def get_room_config(self, request: Request) -> JsonDict: # TODO: Needs unit testing for generic events -class RoomStateEventRestServlet(TransactionRestServlet): +class RoomStateEventRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): - super().__init__(hs) + super().__init__() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() @@ -324,16 +331,16 @@ def __init__(self, hs: "HomeServer"): def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/send/$event_type[/$txn_id] PATTERNS = "/rooms/(?P[^/]*)/send/(?P[^/]*)" - register_txn_path(self, PATTERNS, http_server, with_get=True) + register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, event_type: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) content = parse_json_object_from_request(request) event_dict: JsonDict = { @@ -362,18 +369,30 @@ async def on_POST( set_tag("event_id", event_id) return 200, {"event_id": event_id} - def on_GET( - self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str - ) -> Tuple[int, str]: - return 200, "Not implemented" + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + event_type: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_id, event_type, None) - def on_PUT( + async def on_PUT( self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, event_type, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._do, + request, + requester, + room_id, + event_type, + txn_id, ) @@ -389,14 +408,13 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/join/(?P[^/]*)" register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_identifier: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - content = parse_json_object_from_request(request, allow_empty_body=True) # twisted.web.server.Request.args is incorrectly defined as Optional[Any] @@ -420,22 +438,31 @@ async def on_POST( return 200, {"room_id": room_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_identifier: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_identifier, None) + + async def on_PUT( self, request: SynapseRequest, room_identifier: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_identifier, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, room_identifier, txn_id ) # TODO: Needs unit testing -class PublicRoomListRestServlet(TransactionRestServlet): +class PublicRoomListRestServlet(RestServlet): PATTERNS = client_patterns("/publicRooms$", v1=True) def __init__(self, hs: "HomeServer"): - super().__init__(hs) + super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -814,11 +841,13 @@ async def on_GET( [event], requester.user.to_string() ) - time_now = self.clock.time_msec() # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the # *original* event, rather than the edited version event_dict = self._event_serializer.serialize_event( - event, time_now, bundle_aggregations=aggregations, apply_edits=False + event, + self.clock.time_msec(), + bundle_aggregations=aggregations, + config=SerializeEventConfig(requester=requester), ) return 200, event_dict @@ -863,24 +892,30 @@ async def on_GET( raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() + serializer_options = SerializeEventConfig(requester=requester) results = { "events_before": self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "event": self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "events_after": self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "state": self._event_serializer.serialize_events( - event_context.state, time_now + event_context.state, + time_now, + config=serializer_options, ), "start": event_context.start, "end": event_context.end, @@ -899,22 +934,25 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/rooms/(?P[^/]*)/forget" register_txn_path(self, PATTERNS, http_server) - async def on_POST( - self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=False) - + async def _do(self, requester: Requester, room_id: str) -> Tuple[int, JsonDict]: await self.room_member_handler.forget(user=requester.user, room_id=room_id) return 200, {} - def on_PUT( + async def on_POST( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=False) + return await self._do(requester, room_id) + + async def on_PUT( self, request: SynapseRequest, room_id: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=False) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, requester, room_id ) @@ -926,22 +964,21 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() def register(self, http_server: HttpServer) -> None: - # /rooms/$roomid/[invite|join|leave] + # /rooms/$roomid/[join|invite|leave|ban|unban|kick] PATTERNS = ( "/rooms/(?P[^/]*)/" "(?Pjoin|invite|leave|ban|unban|kick)" ) register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, membership_action: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - if requester.is_guest and membership_action not in { Membership.JOIN, Membership.LEAVE, @@ -1006,13 +1043,30 @@ async def on_POST( return 200, return_value - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + membership_action: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_id, membership_action, None) + + async def on_PUT( self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, membership_action, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._do, + request, + requester, + room_id, + membership_action, + txn_id, ) @@ -1028,14 +1082,14 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/rooms/(?P[^/]*)/redact/(?P[^/]*)" register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, event_id: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) content = parse_json_object_from_request(request) try: @@ -1086,13 +1140,23 @@ async def on_POST( set_tag("event_id", event_id) return 200, {"event_id": event_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + event_id: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester, room_id, event_id, None) + + async def on_PUT( self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, event_id, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, room_id, event_id, txn_id ) @@ -1192,7 +1256,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) batch = parse_string(request, "next_batch") - results = await self.search_handler.search(requester.user, content, batch) + results = await self.search_handler.search(requester, content, batch) return 200, results @@ -1216,7 +1280,6 @@ def register_txn_path( servlet: RestServlet, regex_string: str, http_server: HttpServer, - with_get: bool = False, ) -> None: """Registers a transaction-based path. @@ -1228,7 +1291,6 @@ def register_txn_path( regex_string: The regex string to register. Must NOT have a trailing $ as this string will be appended to. http_server: The http_server to register paths with. - with_get: True to also register respective GET paths for the PUTs. """ on_POST = getattr(servlet, "on_POST", None) on_PUT = getattr(servlet, "on_PUT", None) @@ -1246,18 +1308,6 @@ def register_txn_path( on_PUT, servlet.__class__.__name__, ) - on_GET = getattr(servlet, "on_GET", None) - if with_get: - if on_GET is None: - raise RuntimeError( - "register_txn_path called with with_get = True, but no on_GET method exists" - ) - http_server.register_paths( - "GET", - client_patterns(regex_string + "/(?P[^/]*)$", v1=True), - on_GET, - servlet.__class__.__name__, - ) class TimestampLookupRestServlet(RestServlet): diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index 2739005ae633..42c95f584f09 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -15,9 +15,7 @@ import logging import re from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Optional, Tuple - -from twisted.web.server import Request +from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError, Codes, SynapseError @@ -31,7 +29,6 @@ parse_strings_from_args, ) from synapse.http.site import SynapseRequest -from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict if TYPE_CHECKING: @@ -80,8 +77,6 @@ def __init__(self, hs: "HomeServer"): self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() self.room_batch_handler = hs.get_room_batch_handler() - self.txns = HttpTransactionCache(hs) - self.enable_also_allow_user = hs.config.experimental.msc2716_also_allow_user async def on_POST( self, request: SynapseRequest, room_id: str @@ -274,16 +269,6 @@ async def on_POST( return HTTPStatus.OK, response_dict - def on_GET(self, request: Request, room_id: str) -> Tuple[int, str]: - return HTTPStatus.NOT_IMPLEMENTED, "Not implemented" - - def on_PUT( - self, request: SynapseRequest, room_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id - ) - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: msc2716_enabled = hs.config.experimental.msc2716_enabled diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index f7081f638e51..4e7ffdb5551e 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -259,6 +259,32 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + """ + Retrieve the version information about the most current backup version (if any) + + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + + Returns 404 if the given version does not exist. + + GET /room_keys/version HTTP/1.1 + { + "version": "12345", + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + requester = await self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + try: + info = await self.e2e_room_keys_handler.get_version_info(user_id) + except SynapseError as e: + if e.code == 404: + raise SynapseError(404, "No backup found", Codes.NOT_FOUND) + return 200, info + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: """ Create a new backup version for this user's room_keys with the given @@ -301,7 +327,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class RoomKeysVersionServlet(RestServlet): - PATTERNS = client_patterns("/room_keys/version(/(?P[^/]+))?$") + PATTERNS = client_patterns("/room_keys/version/(?P[^/]+)$") def __init__(self, hs: "HomeServer"): super().__init__() @@ -309,12 +335,11 @@ def __init__(self, hs: "HomeServer"): self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() async def on_GET( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Retrieve the version information about a given version of the user's - room_keys backup. If the version part is missing, returns info about the - most current backup version (if any) + room_keys backup. It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. @@ -339,20 +364,16 @@ async def on_GET( return 200, info async def on_DELETE( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Delete the information about a given version of the user's - room_keys backup. If the version part is missing, deletes the most - current backup version (if any). Doesn't delete the actual room data. + room_keys backup. Doesn't delete the actual room data. DELETE /room_keys/version/12345 HTTP/1.1 HTTP/1.1 200 OK {} """ - if version is None: - raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND) - requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -360,7 +381,7 @@ async def on_DELETE( return 200, {} async def on_PUT( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Update the information about a given version of the user's room_keys backup. @@ -386,11 +407,6 @@ async def on_PUT( user_id = requester.user.to_string() info = parse_json_object_from_request(request) - if version is None: - raise SynapseError( - 400, "No version specified to update", Codes.MISSING_PARAM - ) - await self.e2e_room_keys_handler.update_version(user_id, version, info) return 200, {} diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 55d52f0b2830..110af6df473f 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Awaitable, Tuple +from typing import TYPE_CHECKING, Tuple from synapse.http import servlet from synapse.http.server import HttpServer @@ -21,7 +21,7 @@ from synapse.http.site import SynapseRequest from synapse.logging.opentracing import set_tag from synapse.rest.client.transactions import HttpTransactionCache -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester from ._base import client_patterns @@ -43,19 +43,26 @@ def __init__(self, hs: "HomeServer"): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() - def on_PUT( + async def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self._put, request, message_type, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._put, + request, + requester, + message_type, ) async def _put( - self, request: SynapseRequest, message_type: str, txn_id: str + self, + request: SynapseRequest, + requester: Requester, + message_type: str, ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - content = parse_json_object_from_request(request) assert_params_in_dict(content, ("messages",)) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index b3532d567aef..306edae54774 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -16,7 +16,7 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union -from synapse.api.constants import EduTypes, Membership, PresenceState +from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState @@ -38,7 +38,7 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace_with_opname -from synapse.types import JsonDict, StreamToken +from synapse.types import JsonDict, Requester, StreamToken from synapse.util import json_decoder from ._base import client_patterns, set_timeline_upper_limit @@ -141,6 +141,19 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: beeper_previews, ) + # Stream position of the last ignored users account data event for this user, + # if we're initial syncing. + # We include this in the request key to invalidate an initial sync + # in the response cache once the set of ignored users has changed. + # (We filter out ignored users from timeline events, so our sync response + # is invalid once the set of ignored users changes.) + last_ignore_accdata_streampos: Optional[int] = None + if not since: + # No `since`, so this is an initial sync. + last_ignore_accdata_streampos = await self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( + user.to_string(), AccountDataTypes.IGNORED_USER_LIST + ) + request_key = ( user, timeout, @@ -148,6 +161,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: filter_id, full_state, device_id, + last_ignore_accdata_streampos, beeper_previews, ) @@ -222,7 +236,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # We know that the the requester has an access token since appservices # cannot use sync. response_content = await self.encode_response( - time_now, sync_result, requester.access_token_id, filter_collection + time_now, sync_result, requester, filter_collection ) logger.debug("Event formatting complete") @@ -233,7 +247,7 @@ async def encode_response( self, time_now: int, sync_result: SyncResult, - access_token_id: Optional[int], + requester: Requester, filter: FilterCollection, ) -> JsonDict: logger.debug("Formatting events in sync response") @@ -246,12 +260,12 @@ async def encode_response( serialize_options = SerializeEventConfig( event_format=event_formatter, - token_id=access_token_id, + requester=requester, only_event_fields=filter.event_fields, ) stripped_serialize_options = SerializeEventConfig( event_format=event_formatter, - token_id=access_token_id, + requester=requester, include_stripped_room_state=True, ) diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py index ca638755c7ee..dde08417a407 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py @@ -34,7 +34,9 @@ class TagListServlet(RestServlet): GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1 """ - PATTERNS = client_patterns("/user/(?P[^/]*)/rooms/(?P[^/]*)/tags") + PATTERNS = client_patterns( + "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags$" + ) def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 3f40f1874a8b..f2aaab622740 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -15,16 +15,16 @@ """This module contains logic for storing HTTP PUT transactions. This is used to ensure idempotency when performing PUTs using the REST API.""" import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple from typing_extensions import ParamSpec from twisted.internet.defer import Deferred from twisted.python.failure import Failure -from twisted.web.server import Request +from twisted.web.iweb import IRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester from synapse.util.async_helpers import ObservableDeferred if TYPE_CHECKING: @@ -41,53 +41,47 @@ class HttpTransactionCache: def __init__(self, hs: "HomeServer"): self.hs = hs - self.auth = self.hs.get_auth() self.clock = self.hs.get_clock() # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp) self.transactions: Dict[ - str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int] + Hashable, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int] ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) - def _get_transaction_key(self, request: Request) -> str: + def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used with TransactionCache for idempotent requests. Idempotency is based on the returned key being the same for separate requests to the same endpoint. The key is formed from the HTTP request - path and the access_token for the requesting user. + path and attributes from the requester: the access_token_id for regular users, + the user ID for guest users, and the appservice ID for appservice users. Args: - request: The incoming request. Must contain an access_token. + request: The incoming request. + requester: The requester doing the request. Returns: A transaction key """ assert request.path is not None - token = self.auth.get_access_token_from_request(request) - return request.path.decode("utf8") + "/" + token + path: str = request.path.decode("utf8") + if requester.is_guest: + assert requester.user is not None, "Guest requester must have a user ID set" + return (path, "guest", requester.user) + elif requester.app_service is not None: + return (path, "appservice", requester.app_service.id) + else: + assert ( + requester.access_token_id is not None + ), "Requester must have an access_token_id" + return (path, "user", requester.access_token_id) def fetch_or_execute_request( self, - request: Request, - fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], - *args: P.args, - **kwargs: P.kwargs, - ) -> Awaitable[Tuple[int, JsonDict]]: - """A helper function for fetch_or_execute which extracts - a transaction key from the given request. - - See: - fetch_or_execute - """ - return self.fetch_or_execute( - self._get_transaction_key(request), fn, *args, **kwargs - ) - - def fetch_or_execute( - self, - txn_key: str, + request: IRequest, + requester: Requester, fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], *args: P.args, **kwargs: P.kwargs, @@ -96,14 +90,15 @@ def fetch_or_execute( to produce a response for this transaction. Args: - txn_key: A key to ensure idempotency should fetch_or_execute be - called again at a later point in time. + request: + requester: fn: A function which returns a tuple of (response_code, response_dict). *args: Arguments to pass to fn. **kwargs: Keyword arguments to pass to fn. Returns: Deferred which resolves to a tuple of (response_code, response_dict). """ + txn_key = self._get_transaction_key(request, requester) if txn_key in self.transactions: observable = self.transactions[txn_key][0] else: diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index e19c0946c035..ec171582b3d6 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -109,6 +109,8 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc3773": self.config.experimental.msc3773_enabled, # Allows moderators to fetch redacted event content as described in MSC2815 "fi.mau.msc2815": self.config.experimental.msc2815_enabled, + # Adds a ping endpoint for appservices to check HS->AS connection + "fi.mau.msc2659": self.config.experimental.msc2659_enabled, # Adds support for login token requests as per MSC3882 "org.matrix.msc3882": self.config.experimental.msc3882_enabled, # Adds support for remotely enabling/disabling pushers, as per MSC3881 @@ -120,6 +122,8 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, + # Adds support for unstable "intentional mentions" behaviour. + "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, }, }, ) diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/config_resource.py similarity index 100% rename from synapse/rest/media/v1/config_resource.py rename to synapse/rest/media/config_resource.py diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/download_resource.py similarity index 95% rename from synapse/rest/media/v1/download_resource.py rename to synapse/rest/media/download_resource.py index 048a042692c3..8f270cf4ccb8 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -22,11 +22,10 @@ ) from synapse.http.servlet import parse_boolean from synapse.http.site import SynapseRequest - -from ._base import parse_media_id, respond_404 +from synapse.media._base import parse_media_id, respond_404 if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py new file mode 100644 index 000000000000..5ebaa3b032cd --- /dev/null +++ b/synapse/rest/media/media_repository_resource.py @@ -0,0 +1,93 @@ +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from synapse.config._base import ConfigError +from synapse.http.server import UnrecognizedRequestResource + +from .config_resource import MediaConfigResource +from .download_resource import DownloadResource +from .preview_url_resource import PreviewUrlResource +from .thumbnail_resource import ThumbnailResource +from .upload_resource import UploadResource + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class MediaRepositoryResource(UnrecognizedRequestResource): + """File uploading and downloading. + + Uploads are POSTed to a resource which returns a token which is used to GET + the download:: + + => POST /_matrix/media/r0/upload HTTP/1.1 + Content-Type: + Content-Length: + + + + <= HTTP/1.1 200 OK + Content-Type: application/json + + { "content_uri": "mxc:///" } + + => GET /_matrix/media/r0/download// HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: + Content-Disposition: attachment;filename= + + + + Clients can get thumbnails by supplying a desired width and height and + thumbnailing method:: + + => GET /_matrix/media/r0/thumbnail/ + /?width=&height=&method= HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: image/jpeg or image/png + + + + The thumbnail methods are "crop" and "scale". "scale" tries to return an + image where either the width or the height is smaller than the requested + size. The client should then scale and letterbox the image if it needs to + fit within a given rectangle. "crop" tries to return an image where the + width and height are close to the requested size and the aspect matches + the requested size. The client should scale the image if it needs to fit + within a given rectangle. + """ + + def __init__(self, hs: "HomeServer"): + # If we're not configured to use it, raise if we somehow got here. + if not hs.config.media.can_load_media_repo: + raise ConfigError("Synapse is not configured to use a media repo.") + + super().__init__() + media_repo = hs.get_media_repository() + + self.putChild(b"upload", UploadResource(hs, media_repo)) + self.putChild(b"download", DownloadResource(hs, media_repo)) + self.putChild( + b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) + ) + if hs.config.media.url_preview_enabled: + self.putChild( + b"preview_url", + PreviewUrlResource(hs, media_repo, media_repo.media_storage), + ) + self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py new file mode 100644 index 000000000000..58513c4be43f --- /dev/null +++ b/synapse/rest/media/preview_url_resource.py @@ -0,0 +1,81 @@ +# Copyright 2016 OpenMarket Ltd +# Copyright 2020-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from synapse.http.server import ( + DirectServeJsonResource, + respond_with_json, + respond_with_json_bytes, +) +from synapse.http.servlet import parse_integer, parse_string +from synapse.http.site import SynapseRequest +from synapse.media.media_storage import MediaStorage +from synapse.media.url_previewer import UrlPreviewer + +if TYPE_CHECKING: + from synapse.media.media_repository import MediaRepository + from synapse.server import HomeServer + + +class PreviewUrlResource(DirectServeJsonResource): + """ + The `GET /_matrix/media/r0/preview_url` endpoint provides a generic preview API + for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix + specific additions). + + This does have trade-offs compared to other designs: + + * Pros: + * Simple and flexible; can be used by any clients at any point + * Cons: + * If each homeserver provides one of these independently, all the homeservers in a + room may needlessly DoS the target URI + * The URL metadata must be stored somewhere, rather than just using Matrix + itself to store the media. + * Matrix cannot be used to distribute the metadata between homeservers. + """ + + isLeaf = True + + def __init__( + self, + hs: "HomeServer", + media_repo: "MediaRepository", + media_storage: MediaStorage, + ): + super().__init__() + + self.auth = hs.get_auth() + self.clock = hs.get_clock() + self.media_repo = media_repo + self.media_storage = media_storage + + self._url_previewer = UrlPreviewer(hs, media_repo, media_storage) + + async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: + request.setHeader(b"Allow", b"OPTIONS, GET") + respond_with_json(request, 200, {}, send_cors=True) + + async def _async_render_GET(self, request: SynapseRequest) -> None: + # XXX: if get_user_by_req fails, what should we do in an async render? + requester = await self.auth.get_user_by_req(request) + url = parse_string(request, "url", required=True) + ts = parse_integer(request, "ts") + if ts is None: + ts = self.clock.time_msec() + + og = await self._url_previewer.preview(url, requester.user, ts) + respond_with_json_bytes(request, 200, og, send_cors=True) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py similarity index 98% rename from synapse/rest/media/v1/thumbnail_resource.py rename to synapse/rest/media/thumbnail_resource.py index 5f725c76009f..4ee2a0dbda79 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -27,9 +27,7 @@ ) from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.rest.media.v1.media_storage import MediaStorage - -from ._base import ( +from synapse.media._base import ( FileInfo, ThumbnailInfo, parse_media_id, @@ -37,9 +35,10 @@ respond_with_file, respond_with_responder, ) +from synapse.media.media_storage import MediaStorage if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -69,7 +68,8 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") - m_type = parse_string(request, "type", "image/png") + # TODO Parse the Accept header to get an prioritised list of thumbnail types. + m_type = "image/png" if server_name == self.server_name: if self.dynamic_thumbnails: diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/upload_resource.py similarity index 96% rename from synapse/rest/media/v1/upload_resource.py rename to synapse/rest/media/upload_resource.py index 97548b54e504..697348613b52 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -20,10 +20,10 @@ from synapse.http.server import DirectServeJsonResource, respond_with_json from synapse.http.servlet import parse_bytes_from_args from synapse.http.site import SynapseRequest -from synapse.rest.media.v1.media_storage import SpamMediaException +from synapse.media.media_storage import SpamMediaException if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index d30878f70496..88427a573708 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -1,5 +1,4 @@ -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2019-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,466 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -import logging -import os -import urllib -from types import TracebackType -from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type - -import attr - -from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender -from twisted.web.server import Request - -from synapse.api.errors import Codes, SynapseError, cs_error -from synapse.http.server import finish_request, respond_with_json -from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable -from synapse.util.stringutils import is_ascii, parse_and_validate_server_name - -logger = logging.getLogger(__name__) - -# list all text content types that will have the charset default to UTF-8 when -# none is given -TEXT_CONTENT_TYPES = [ - "text/css", - "text/csv", - "text/html", - "text/calendar", - "text/plain", - "text/javascript", - "application/json", - "application/ld+json", - "application/rtf", - "image/svg+xml", - "text/xml", -] - - -def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: - """Parses the server name, media ID and optional file name from the request URI - - Also performs some rough validation on the server name. - - Args: - request: The `Request`. - - Returns: - A tuple containing the parsed server name, media ID and optional file name. - - Raises: - SynapseError(404): if parsing or validation fail for any reason - """ - try: - # The type on postpath seems incorrect in Twisted 21.2.0. - postpath: List[bytes] = request.postpath # type: ignore - assert postpath - - # This allows users to append e.g. /test.png to the URL. Useful for - # clients that parse the URL to see content type. - server_name_bytes, media_id_bytes = postpath[:2] - server_name = server_name_bytes.decode("utf-8") - media_id = media_id_bytes.decode("utf8") - - # Validate the server name, raising if invalid - parse_and_validate_server_name(server_name) - - file_name = None - if len(postpath) > 2: - try: - file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) - except UnicodeDecodeError: - pass - return server_name, media_id, file_name - except Exception: - raise SynapseError( - 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN - ) - - -def respond_404(request: SynapseRequest) -> None: - respond_with_json( - request, - 404, - cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), - send_cors=True, - ) - - -async def respond_with_file( - request: SynapseRequest, - media_type: str, - file_path: str, - file_size: Optional[int] = None, - upload_name: Optional[str] = None, -) -> None: - logger.debug("Responding with %r", file_path) - - if os.path.isfile(file_path): - if file_size is None: - stat = os.stat(file_path) - file_size = stat.st_size - - add_file_headers(request, media_type, file_size, upload_name) - - with open(file_path, "rb") as f: - await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) - - finish_request(request) - else: - respond_404(request) - - -def add_file_headers( - request: Request, - media_type: str, - file_size: Optional[int], - upload_name: Optional[str], -) -> None: - """Adds the correct response headers in preparation for responding with the - media. - - Args: - request - media_type: The media/content type. - file_size: Size in bytes of the media, if known. - upload_name: The name of the requested file, if any. - """ - - def _quote(x: str) -> str: - return urllib.parse.quote(x.encode("utf-8")) - - # Default to a UTF-8 charset for text content types. - # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16' - if media_type.lower() in TEXT_CONTENT_TYPES: - content_type = media_type + "; charset=UTF-8" - else: - content_type = media_type - - request.setHeader(b"Content-Type", content_type.encode("UTF-8")) - if upload_name: - # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. - # - # `filename` is defined to be a `value`, which is defined by RFC2616 - # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token` - # is (essentially) a single US-ASCII word, and a `quoted-string` is a - # US-ASCII string surrounded by double-quotes, using backslash as an - # escape character. Note that %-encoding is *not* permitted. - # - # `filename*` is defined to be an `ext-value`, which is defined in - # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`, - # where `value-chars` is essentially a %-encoded string in the given charset. - # - # [1]: https://tools.ietf.org/html/rfc6266#section-4.1 - # [2]: https://tools.ietf.org/html/rfc2616#section-3.6 - # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1 - - # We avoid the quoted-string version of `filename`, because (a) synapse didn't - # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we - # may as well just do the filename* version. - if _can_encode_filename_as_token(upload_name): - disposition = "inline; filename=%s" % (upload_name,) - else: - disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),) - - request.setHeader(b"Content-Disposition", disposition.encode("ascii")) - - # cache for at least a day. - # XXX: we might want to turn this off for data we don't want to - # recommend caching as it's sensitive or private - or at least - # select private. don't bother setting Expires as all our - # clients are smart enough to be happy with Cache-Control - request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400") - if file_size is not None: - request.setHeader(b"Content-Length", b"%d" % (file_size,)) - - # Tell web crawlers to not index, archive, or follow links in media. This - # should help to prevent things in the media repo from showing up in web - # search results. - request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") - - -# separators as defined in RFC2616. SP and HT are handled separately. -# see _can_encode_filename_as_token. -_FILENAME_SEPARATOR_CHARS = { - "(", - ")", - "<", - ">", - "@", - ",", - ";", - ":", - "\\", - '"', - "/", - "[", - "]", - "?", - "=", - "{", - "}", -} - - -def _can_encode_filename_as_token(x: str) -> bool: - for c in x: - # from RFC2616: - # - # token = 1* - # - # separators = "(" | ")" | "<" | ">" | "@" - # | "," | ";" | ":" | "\" | <"> - # | "/" | "[" | "]" | "?" | "=" - # | "{" | "}" | SP | HT - # - # CHAR = - # - # CTL = - # - if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS: - return False - return True - - -async def respond_with_responder( - request: SynapseRequest, - responder: "Optional[Responder]", - media_type: str, - file_size: Optional[int], - upload_name: Optional[str] = None, -) -> None: - """Responds to the request with given responder. If responder is None then - returns 404. - - Args: - request - responder - media_type: The media/content type. - file_size: Size in bytes of the media. If not known it should be None - upload_name: The name of the requested file, if any. - """ - if not responder: - respond_404(request) - return - - # If we have a responder we *must* use it as a context manager. - with responder: - if request._disconnected: - logger.warning( - "Not sending response to request %s, already disconnected.", request - ) - return - - logger.debug("Responding to media request with responder %s", responder) - add_file_headers(request, media_type, file_size, upload_name) - try: - - await responder.write_to_consumer(request) - except Exception as e: - # The majority of the time this will be due to the client having gone - # away. Unfortunately, Twisted simply throws a generic exception at us - # in that case. - logger.warning("Failed to write to consumer: %s %s", type(e), e) - - # Unregister the producer, if it has one, so Twisted doesn't complain - if request.producer: - request.unregisterProducer() - - finish_request(request) - - -class Responder: - """Represents a response that can be streamed to the requester. - - Responder is a context manager which *must* be used, so that any resources - held can be cleaned up. - """ - - def write_to_consumer(self, consumer: IConsumer) -> Awaitable: - """Stream response into consumer - - Args: - consumer: The consumer to stream into. - - Returns: - Resolves once the response has finished being written - """ - - def __enter__(self) -> None: - pass - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class ThumbnailInfo: - """Details about a generated thumbnail.""" - - width: int - height: int - method: str - # Content type of thumbnail, e.g. image/png - type: str - # The size of the media file, in bytes. - length: Optional[int] = None - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class FileInfo: - """Details about a requested/uploaded file.""" - - # The server name where the media originated from, or None if local. - server_name: Optional[str] - # The local ID of the file. For local files this is the same as the media_id - file_id: str - # If the file is for the url preview cache - url_cache: bool = False - # Whether the file is a thumbnail or not. - thumbnail: Optional[ThumbnailInfo] = None - - # The below properties exist to maintain compatibility with third-party modules. - @property - def thumbnail_width(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.width - - @property - def thumbnail_height(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.height - - @property - def thumbnail_method(self) -> Optional[str]: - if not self.thumbnail: - return None - return self.thumbnail.method - - @property - def thumbnail_type(self) -> Optional[str]: - if not self.thumbnail: - return None - return self.thumbnail.type - - @property - def thumbnail_length(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.length - - -def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]: - """ - Get the filename of the downloaded file by inspecting the - Content-Disposition HTTP header. - - Args: - headers: The HTTP request headers. - - Returns: - The filename, or None. - """ - content_disposition = headers.get(b"Content-Disposition", [b""]) - - # No header, bail out. - if not content_disposition[0]: - return None - - _, params = _parse_header(content_disposition[0]) - - upload_name = None - - # First check if there is a valid UTF-8 filename - upload_name_utf8 = params.get(b"filename*", None) - if upload_name_utf8: - if upload_name_utf8.lower().startswith(b"utf-8''"): - upload_name_utf8 = upload_name_utf8[7:] - # We have a filename*= section. This MUST be ASCII, and any UTF-8 - # bytes are %-quoted. - try: - # Once it is decoded, we can then unquote the %-encoded - # parts strictly into a unicode string. - upload_name = urllib.parse.unquote( - upload_name_utf8.decode("ascii"), errors="strict" - ) - except UnicodeDecodeError: - # Incorrect UTF-8. - pass - - # If there isn't check for an ascii name. - if not upload_name: - upload_name_ascii = params.get(b"filename", None) - if upload_name_ascii and is_ascii(upload_name_ascii): - upload_name = upload_name_ascii.decode("ascii") - - # This may be None here, indicating we did not find a matching name. - return upload_name - - -def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]: - """Parse a Content-type like header. - - Cargo-culted from `cgi`, but works on bytes rather than strings. - - Args: - line: header to be parsed - - Returns: - The main content-type, followed by the parameter dictionary - """ - parts = _parseparam(b";" + line) - key = next(parts) - pdict = {} - for p in parts: - i = p.find(b"=") - if i >= 0: - name = p[:i].strip().lower() - value = p[i + 1 :].strip() - - # strip double-quotes - if len(value) >= 2 and value[0:1] == value[-1:] == b'"': - value = value[1:-1] - value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"') - pdict[name] = value - - return key, pdict - - -def _parseparam(s: bytes) -> Generator[bytes, None, None]: - """Generator which splits the input on ;, respecting double-quoted sequences - - Cargo-culted from `cgi`, but works on bytes rather than strings. - - Args: - s: header to be parsed - - Returns: - The split input - """ - while s[:1] == b";": - s = s[1:] - - # look for the next ; - end = s.find(b";") - - # if there is an odd number of " marks between here and the next ;, skip to the - # next ; instead - while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2: - end = s.find(b";", end + 1) - - if end < 0: - end = len(s) - f = s[:end] - yield f.strip() - s = s[end:] +# This exists purely for backwards compatibility with media providers and spam checkers. +from synapse.media._base import FileInfo, Responder # noqa: F401 diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index a5c3de192f6f..11b0e8e23185 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -1,4 +1,4 @@ -# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,365 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib -import logging -import os -import shutil -from types import TracebackType -from typing import ( - IO, - TYPE_CHECKING, - Any, - Awaitable, - BinaryIO, - Callable, - Generator, - Optional, - Sequence, - Tuple, - Type, -) - -import attr - -from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender - -import synapse -from synapse.api.errors import NotFoundError -from synapse.logging.context import defer_to_thread, make_deferred_yieldable -from synapse.util import Clock -from synapse.util.file_consumer import BackgroundFileConsumer - -from ._base import FileInfo, Responder -from .filepath import MediaFilePaths - -if TYPE_CHECKING: - from synapse.server import HomeServer - - from .storage_provider import StorageProviderWrapper - -logger = logging.getLogger(__name__) - - -class MediaStorage: - """Responsible for storing/fetching files from local sources. - - Args: - hs - local_media_directory: Base path where we store media on disk - filepaths - storage_providers: List of StorageProvider that are used to fetch and store files. - """ - - def __init__( - self, - hs: "HomeServer", - local_media_directory: str, - filepaths: MediaFilePaths, - storage_providers: Sequence["StorageProviderWrapper"], - ): - self.hs = hs - self.reactor = hs.get_reactor() - self.local_media_directory = local_media_directory - self.filepaths = filepaths - self.storage_providers = storage_providers - self.spam_checker = hs.get_spam_checker() - self.clock = hs.get_clock() - - async def store_file(self, source: IO, file_info: FileInfo) -> str: - """Write `source` to the on disk media store, and also any other - configured storage providers - - Args: - source: A file like object that should be written - file_info: Info about the file to store - - Returns: - the file path written to in the primary media store - """ - - with self.store_into_file(file_info) as (f, fname, finish_cb): - # Write to the main repository - await self.write_to_file(source, f) - await finish_cb() - - return fname - - async def write_to_file(self, source: IO, output: IO) -> None: - """Asynchronously write the `source` to `output`.""" - await defer_to_thread(self.reactor, _write_file_synchronously, source, output) - - @contextlib.contextmanager - def store_into_file( - self, file_info: FileInfo - ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]: - """Context manager used to get a file like object to write into, as - described by file_info. - - Actually yields a 3-tuple (file, fname, finish_cb), where file is a file - like object that can be written to, fname is the absolute path of file - on disk, and finish_cb is a function that returns an awaitable. - - fname can be used to read the contents from after upload, e.g. to - generate thumbnails. - - finish_cb must be called and waited on after the file has been - successfully been written to. Should not be called if there was an - error. - - Args: - file_info: Info about the file to store - - Example: - - with media_storage.store_into_file(info) as (f, fname, finish_cb): - # .. write into f ... - await finish_cb() - """ - - path = self._file_info_to_path(file_info) - fname = os.path.join(self.local_media_directory, path) - - dirname = os.path.dirname(fname) - os.makedirs(dirname, exist_ok=True) - - finished_called = [False] - - try: - with open(fname, "wb") as f: - - async def finish() -> None: - # Ensure that all writes have been flushed and close the - # file. - f.flush() - f.close() - - spam_check = await self.spam_checker.check_media_file_for_spam( - ReadableFileWrapper(self.clock, fname), file_info - ) - if spam_check != synapse.module_api.NOT_SPAM: - logger.info("Blocking media due to spam checker") - # Note that we'll delete the stored media, due to the - # try/except below. The media also won't be stored in - # the DB. - # We currently ignore any additional field returned by - # the spam-check API. - raise SpamMediaException(errcode=spam_check[0]) - - for provider in self.storage_providers: - await provider.store_file(path, file_info) - - finished_called[0] = True - - yield f, fname, finish - except Exception as e: - try: - os.remove(fname) - except Exception: - pass - - raise e from None - - if not finished_called: - raise Exception("Finished callback not called") - - async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: - """Attempts to fetch media described by file_info from the local cache - and configured storage providers. - - Args: - file_info - - Returns: - Returns a Responder if the file was found, otherwise None. - """ - paths = [self._file_info_to_path(file_info)] - - # fallback for remote thumbnails with no method in the filename - if file_info.thumbnail and file_info.server_name: - paths.append( - self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - ) - ) - - for path in paths: - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - logger.debug("responding with local file %s", local_path) - return FileResponder(open(local_path, "rb")) - logger.debug("local file %s did not exist", local_path) - - for provider in self.storage_providers: - for path in paths: - res: Any = await provider.fetch(path, file_info) - if res: - logger.debug("Streaming %s from %s", path, provider) - return res - logger.debug("%s not found on %s", path, provider) - - return None - - async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: - """Ensures that the given file is in the local cache. Attempts to - download it from storage providers if it isn't. - - Args: - file_info - - Returns: - Full path to local file - """ - path = self._file_info_to_path(file_info) - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - return local_path - - # Fallback for paths without method names - # Should be removed in the future - if file_info.thumbnail and file_info.server_name: - legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - ) - legacy_local_path = os.path.join(self.local_media_directory, legacy_path) - if os.path.exists(legacy_local_path): - return legacy_local_path - - dirname = os.path.dirname(local_path) - os.makedirs(dirname, exist_ok=True) - - for provider in self.storage_providers: - res: Any = await provider.fetch(path, file_info) - if res: - with res: - consumer = BackgroundFileConsumer( - open(local_path, "wb"), self.reactor - ) - await res.write_to_consumer(consumer) - await consumer.wait() - return local_path - - raise NotFoundError() - - def _file_info_to_path(self, file_info: FileInfo) -> str: - """Converts file_info into a relative path. - - The path is suitable for storing files under a directory, e.g. used to - store files on local FS under the base media repository directory. - """ - if file_info.url_cache: - if file_info.thumbnail: - return self.filepaths.url_cache_thumbnail_rel( - media_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.url_cache_filepath_rel(file_info.file_id) - - if file_info.server_name: - if file_info.thumbnail: - return self.filepaths.remote_media_thumbnail_rel( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.remote_media_filepath_rel( - file_info.server_name, file_info.file_id - ) - - if file_info.thumbnail: - return self.filepaths.local_media_thumbnail_rel( - media_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.local_media_filepath_rel(file_info.file_id) - - -def _write_file_synchronously(source: IO, dest: IO) -> None: - """Write `source` to the file like `dest` synchronously. Should be called - from a thread. - - Args: - source: A file like object that's to be written - dest: A file like object to be written to - """ - source.seek(0) # Ensure we read from the start of the file - shutil.copyfileobj(source, dest) - - -class FileResponder(Responder): - """Wraps an open file that can be sent to a request. - - Args: - open_file: A file like object to be streamed ot the client, - is closed when finished streaming. - """ - - def __init__(self, open_file: IO): - self.open_file = open_file - - def write_to_consumer(self, consumer: IConsumer) -> Deferred: - return make_deferred_yieldable( - FileSender().beginFileTransfer(self.open_file, consumer) - ) - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.open_file.close() - - -class SpamMediaException(NotFoundError): - """The media was blocked by a spam checker, so we simply 404 the request (in - the same way as if it was quarantined). - """ - - -@attr.s(slots=True, auto_attribs=True) -class ReadableFileWrapper: - """Wrapper that allows reading a file in chunks, yielding to the reactor, - and writing to a callback. - - This is simplified `FileSender` that takes an IO object rather than an - `IConsumer`. - """ - - CHUNK_SIZE = 2**14 - - clock: Clock - path: str - - async def write_chunks_to(self, callback: Callable[[bytes], None]) -> None: - """Reads the file in chunks and calls the callback with each chunk.""" - - with open(self.path, "rb") as file: - while True: - chunk = file.read(self.CHUNK_SIZE) - if not chunk: - break - - callback(chunk) +# - # We yield to the reactor by sleeping for 0 seconds. - await self.clock.sleep(0) +# This exists purely for backwards compatibility with spam checkers. +from synapse.media.media_storage import ReadableFileWrapper # noqa: F401 diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 1c9b71d69c77..d7653f30aead 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -1,4 +1,4 @@ -# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,171 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -import abc -import logging -import os -import shutil -from typing import TYPE_CHECKING, Callable, Optional - -from synapse.config._base import Config -from synapse.logging.context import defer_to_thread, run_in_background -from synapse.util.async_helpers import maybe_awaitable - -from ._base import FileInfo, Responder -from .media_storage import FileResponder - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class StorageProvider(metaclass=abc.ABCMeta): - """A storage provider is a service that can store uploaded media and - retrieve them. - """ - - @abc.abstractmethod - async def store_file(self, path: str, file_info: FileInfo) -> None: - """Store the file described by file_info. The actual contents can be - retrieved by reading the file in file_info.upload_path. - - Args: - path: Relative path of file in local cache - file_info: The metadata of the file. - """ - - @abc.abstractmethod - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - """Attempt to fetch the file described by file_info and stream it - into writer. - - Args: - path: Relative path of file in local cache - file_info: The metadata of the file. - - Returns: - Returns a Responder if the provider has the file, otherwise returns None. - """ - - -class StorageProviderWrapper(StorageProvider): - """Wraps a storage provider and provides various config options - - Args: - backend: The storage provider to wrap. - store_local: Whether to store new local files or not. - store_synchronous: Whether to wait for file to be successfully - uploaded, or todo the upload in the background. - store_remote: Whether remote media should be uploaded - """ - - def __init__( - self, - backend: StorageProvider, - store_local: bool, - store_synchronous: bool, - store_remote: bool, - ): - self.backend = backend - self.store_local = store_local - self.store_synchronous = store_synchronous - self.store_remote = store_remote - - def __str__(self) -> str: - return "StorageProviderWrapper[%s]" % (self.backend,) - - async def store_file(self, path: str, file_info: FileInfo) -> None: - if not file_info.server_name and not self.store_local: - return None - - if file_info.server_name and not self.store_remote: - return None - - if file_info.url_cache: - # The URL preview cache is short lived and not worth offloading or - # backing up. - return None - - if self.store_synchronous: - # store_file is supposed to return an Awaitable, but guard - # against improper implementations. - await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore - else: - # TODO: Handle errors. - async def store() -> None: - try: - return await maybe_awaitable( - self.backend.store_file(path, file_info) - ) - except Exception: - logger.exception("Error storing file") - - run_in_background(store) - - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - if file_info.url_cache: - # Files in the URL preview cache definitely aren't stored here, - # so avoid any potentially slow I/O or network access. - return None - - # store_file is supposed to return an Awaitable, but guard - # against improper implementations. - return await maybe_awaitable(self.backend.fetch(path, file_info)) - - -class FileStorageProviderBackend(StorageProvider): - """A storage provider that stores files in a directory on a filesystem. - - Args: - hs - config: The config returned by `parse_config`. - """ - - def __init__(self, hs: "HomeServer", config: str): - self.hs = hs - self.cache_directory = hs.config.media.media_store_path - self.base_directory = config - - def __str__(self) -> str: - return "FileStorageProviderBackend[%s]" % (self.base_directory,) - - async def store_file(self, path: str, file_info: FileInfo) -> None: - """See StorageProvider.store_file""" - - primary_fname = os.path.join(self.cache_directory, path) - backup_fname = os.path.join(self.base_directory, path) - - dirname = os.path.dirname(backup_fname) - os.makedirs(dirname, exist_ok=True) - - # mypy needs help inferring the type of the second parameter, which is generic - shutil_copyfile: Callable[[str, str], str] = shutil.copyfile - await defer_to_thread( - self.hs.get_reactor(), - shutil_copyfile, - primary_fname, - backup_fname, - ) - - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - """See StorageProvider.fetch""" - - backup_fname = os.path.join(self.base_directory, path) - if os.path.isfile(backup_fname): - return FileResponder(open(backup_fname, "rb")) - - return None - - @staticmethod - def parse_config(config: dict) -> str: - """Called on startup to parse config supplied. This should parse - the config and raise if there is a problem. - - The returned value is passed into the constructor. - - In this case we only care about a single param, the directory, so let's - just pull that out. - """ - return Config.ensure_directory(config["directory"]) +# This exists purely for backwards compatibility with media providers. +from synapse.media.storage_provider import StorageProvider # noqa: F401 diff --git a/synapse/server.py b/synapse/server.py index 3cfb1b173b50..469966a0d749 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -21,7 +21,9 @@ import abc import functools import logging -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, cast +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast + +from typing_extensions import TypeAlias from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port @@ -105,8 +107,10 @@ from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.media.media_repository import MediaRepository from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi +from synapse.module_api.callbacks import ModuleApiCallbacks from synapse.notifier import Notifier, ReplicationNotifier from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.push.pusherpool import PusherPool @@ -116,10 +120,7 @@ from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.resource import ReplicationStreamer from synapse.replication.tcp.streams import STREAMS_MAP, Stream -from synapse.rest.media.v1.media_repository import ( - MediaRepository, - MediaRepositoryResource, -) +from synapse.rest.media.media_repository_resource import MediaRepositoryResource from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import ( @@ -145,10 +146,31 @@ from synapse.handlers.saml import SamlHandler -T = TypeVar("T", bound=Callable[..., Any]) +# The annotation for `cache_in_self` used to be +# def (builder: Callable[["HomeServer"],T]) -> Callable[["HomeServer"],T] +# which mypy was happy with. +# +# But PyCharm was confused by this. If `foo` was decorated by `@cache_in_self`, then +# an expression like `hs.foo()` +# +# - would erroneously warn that we hadn't provided a `hs` argument to foo (PyCharm +# confused about boundmethods and unbound methods?), and +# - would be considered to have type `Any`, making for a poor autocomplete and +# cross-referencing experience. +# +# Instead, use a typevar `F` to express that `@cache_in_self` returns exactly the +# same type it receives. This isn't strictly true [*], but it's more than good +# enough to keep PyCharm and mypy happy. +# +# [*]: (e.g. `builder` could be an object with a __call__ attribute rather than a +# types.FunctionType instance, whereas the return value is always a +# types.FunctionType instance.) + +T: TypeAlias = object +F = TypeVar("F", bound=Callable[["HomeServer"], T]) -def cache_in_self(builder: T) -> T: +def cache_in_self(builder: F) -> F: """Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and returning if so. If not, calls the given function and sets `self.foo` to it. @@ -167,7 +189,7 @@ def cache_in_self(builder: T) -> T: building = [False] @functools.wraps(builder) - def _get(self): + def _get(self: "HomeServer") -> T: try: return getattr(self, depname) except AttributeError: @@ -186,9 +208,7 @@ def _get(self): return dep - # We cast here as we need to tell mypy that `_get` has the same signature as - # `builder`. - return cast(T, _get) + return cast(F, _get) class HomeServer(metaclass=abc.ABCMeta): @@ -782,6 +802,10 @@ def get_federation_ratelimiter(self) -> FederationRateLimiter: def get_module_api(self) -> ModuleApi: return ModuleApi(self, self.get_auth_handler()) + @cache_in_self + def get_module_api_callbacks(self) -> ModuleApiCallbacks: + return ModuleApiCallbacks() + @cache_in_self def get_account_data_handler(self) -> AccountDataHandler: return AccountDataHandler(self) @@ -834,6 +858,7 @@ def get_outbound_redis_connection(self) -> "ConnectionHandler": hs=self, host=self.config.redis.redis_host, port=self.config.redis.redis_port, + dbid=self.config.redis.redis_dbid, password=self.config.redis.redis_password, reconnect=True, ) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 564e3705c253..9732dbdb6e66 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -178,7 +178,7 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: "avatar_url": self._config.servernotices.server_notices_mxid_avatar_url, } - info, _ = await self._room_creation_handler.create_room( + room_id, _, _ = await self._room_creation_handler.create_room( requester, config={ "preset": RoomCreationPreset.PRIVATE_CHAT, @@ -188,7 +188,6 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: ratelimit=False, creator_join_profile=join_profile, ) - room_id = info["room_id"] self.maybe_get_notice_room_for_user.invalidate((user_id,)) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fdfb46ab82ad..4dc25df67e17 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -39,7 +39,11 @@ from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import ( + EventContext, + UnpersistedEventContext, + UnpersistedEventContextBase, +) from synapse.logging.context import ContextResourceUsage from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 @@ -222,7 +226,7 @@ async def compute_state_after_events( return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_user_ids_in_room( - self, room_id: str, latest_event_ids: List[str] + self, room_id: str, latest_event_ids: Collection[str] ) -> Set[str]: """ Get the users IDs who are currently in a room. @@ -262,31 +266,31 @@ async def get_hosts_in_room_at_events( state = await entry.get_state(self._state_storage_controller, StateFilter.all()) return await self.store.get_joined_hosts(room_id, state, entry) - async def compute_event_context( + async def calculate_context_info( self, event: EventBase, state_ids_before_event: Optional[StateMap[str]] = None, partial_state: Optional[bool] = None, - ) -> EventContext: - """Build an EventContext structure for a non-outlier event. - - (for an outlier, call EventContext.for_outlier directly) - - This works out what the current state should be for the event, and - generates a new state group if necessary. - - Args: - event: - state_ids_before_event: The event ids of the state before the event if - it can't be calculated from existing events. This is normally - only specified when receiving an event from federation where we - don't have the prev events, e.g. when backfilling. - partial_state: - `True` if `state_ids_before_event` is partial and omits non-critical - membership events. - `False` if `state_ids_before_event` is the full state. - `None` when `state_ids_before_event` is not provided. In this case, the - flag will be calculated based on `event`'s prev events. + state_group_before_event: Optional[int] = None, + ) -> UnpersistedEventContextBase: + """ + Calulates the contents of an unpersisted event context, other than the current + state group (which is either provided or calculated when the event context is persisted) + + state_ids_before_event: + The event ids of the full state before the event if + it can't be calculated from existing events. This is normally + only specified when receiving an event from federation where we + don't have the prev events, e.g. when backfilling or when the event + is being created for batch persisting. + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. + state_group_before_event: + the current state group at the time of event, if known Returns: The event context. @@ -294,7 +298,6 @@ async def compute_event_context( RuntimeError if `state_ids_before_event` is not provided and one or more prev events are missing or outliers. """ - assert not event.internal_metadata.is_outlier() # @@ -306,17 +309,6 @@ async def compute_event_context( state_group_before_event_prev_group = None deltas_to_state_group_before_event = None - # .. though we need to get a state group for it. - state_group_before_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=None, - delta_ids=None, - current_state_ids=state_ids_before_event, - ) - ) - # the partial_state flag must be provided assert partial_state is not None else: @@ -345,6 +337,7 @@ async def compute_event_context( logger.debug("calling resolve_state_groups from compute_event_context") # we've already taken into account partial state, so no need to wait for # complete state here. + entry = await self.resolve_state_groups_for_events( event.room_id, event.prev_event_ids(), @@ -383,18 +376,19 @@ async def compute_event_context( # if not event.is_state(): - return EventContext.with_state( + return UnpersistedEventContext( storage=self._storage_controllers, state_group_before_event=state_group_before_event, - state_group=state_group_before_event, + state_group_after_event=state_group_before_event, state_delta_due_to_event={}, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, + prev_group_for_state_group_before_event=state_group_before_event_prev_group, + delta_ids_to_state_group_before_event=deltas_to_state_group_before_event, partial_state=partial_state, + state_map_before_event=state_ids_before_event, ) # - # otherwise, we'll need to create a new state group for after the event + # otherwise, we'll need to set up creating a new state group for after the event # key = (event.type, event.state_key) @@ -412,88 +406,60 @@ async def compute_event_context( delta_ids = {key: event.event_id} - state_group_after_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event, - delta_ids=delta_ids, - current_state_ids=None, - ) - ) - - return EventContext.with_state( + return UnpersistedEventContext( storage=self._storage_controllers, - state_group=state_group_after_event, state_group_before_event=state_group_before_event, + state_group_after_event=None, state_delta_due_to_event=delta_ids, - prev_group=state_group_before_event, - delta_ids=delta_ids, + prev_group_for_state_group_before_event=state_group_before_event_prev_group, + delta_ids_to_state_group_before_event=deltas_to_state_group_before_event, partial_state=partial_state, + state_map_before_event=state_ids_before_event, ) - async def compute_event_context_for_batched( + async def compute_event_context( self, event: EventBase, - state_ids_before_event: StateMap[str], - current_state_group: int, + state_ids_before_event: Optional[StateMap[str]] = None, + partial_state: Optional[bool] = None, ) -> EventContext: - """ - Generate an event context for an event that has not yet been persisted to the - database. Intended for use with events that are created to be persisted in a batch. - Args: - event: the event the context is being computed for - state_ids_before_event: a state map consisting of the state ids of the events - created prior to this event. - current_state_group: the current state group before the event. - """ - state_group_before_event_prev_group = None - deltas_to_state_group_before_event = None - - state_group_before_event = current_state_group - - # if the event is not state, we are set - if not event.is_state(): - return EventContext.with_state( - storage=self._storage_controllers, - state_group_before_event=state_group_before_event, - state_group=state_group_before_event, - state_delta_due_to_event={}, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, - partial_state=False, - ) + """Build an EventContext structure for a non-outlier event. - # otherwise, we'll need to create a new state group for after the event - key = (event.type, event.state_key) + (for an outlier, call EventContext.for_outlier directly) - if state_ids_before_event is not None: - replaces = state_ids_before_event.get(key) + This works out what the current state should be for the event, and + generates a new state group if necessary. - if replaces and replaces != event.event_id: - event.unsigned["replaces_state"] = replaces + Args: + event: + state_ids_before_event: The event ids of the state before the event if + it can't be calculated from existing events. This is normally + only specified when receiving an event from federation where we + don't have the prev events, e.g. when backfilling. + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. + entry: + A state cache entry for the resolved state across the prev events. We may + have already calculated this, so if it's available pass it in + Returns: + The event context. - delta_ids = {key: event.event_id} + Raises: + RuntimeError if `state_ids_before_event` is not provided and one or more + prev events are missing or outliers. + """ - state_group_after_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event, - delta_ids=delta_ids, - current_state_ids=None, - ) + unpersisted_context = await self.calculate_context_info( + event=event, + state_ids_before_event=state_ids_before_event, + partial_state=partial_state, ) - return EventContext.with_state( - storage=self._storage_controllers, - state_group=state_group_after_event, - state_group_before_event=state_group_before_event, - state_delta_due_to_event=delta_ids, - prev_group=state_group_before_event, - delta_ids=delta_ids, - partial_state=False, - ) + return await unpersisted_context.persist(event) @measure_func() async def resolve_state_groups_for_events( diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index e8092b85f137..1ba4866b130b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -37,6 +37,8 @@ class SQLBaseStore(metaclass=ABCMeta): per data store (and not one per physical database). """ + db_pool: DatabasePool + def __init__( self, database: DatabasePool, diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 9ca50d6a0982..c599397b86ff 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -16,6 +16,7 @@ import logging from typing import TYPE_CHECKING, Set +from synapse.logging.context import nested_logging_context from synapse.storage.databases import Databases if TYPE_CHECKING: @@ -33,8 +34,9 @@ def __init__(self, hs: "HomeServer", stores: Databases): async def purge_room(self, room_id: str) -> None: """Deletes all record of a room""" - state_groups_to_delete = await self.stores.main.purge_room(room_id) - await self.stores.state.purge_room_state(room_id, state_groups_to_delete) + with nested_logging_context(room_id): + state_groups_to_delete = await self.stores.main.purge_room(room_id) + await self.stores.state.purge_room_state(room_id, state_groups_to_delete) async def purge_history( self, room_id: str, token: str, delete_local_events: bool @@ -51,15 +53,17 @@ async def purge_history( (instead of just marking them as outliers and deleting their state groups). """ - state_groups = await self.stores.main.purge_history( - room_id, token, delete_local_events - ) - - logger.info("[purge] finding state groups that can be deleted") + with nested_logging_context(room_id): + state_groups = await self.stores.main.purge_history( + room_id, token, delete_local_events + ) - sg_to_delete = await self._find_unreferenced_groups(state_groups) + logger.info("[purge] finding state groups that can be deleted") + sg_to_delete = await self._find_unreferenced_groups(state_groups) - await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete) + await self.stores.state.purge_unreferenced_state_groups( + room_id, sg_to_delete + ) async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]: """Used when purging history to figure out which state groups can be diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 52efd4a1719b..9d7a8a792f71 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -14,6 +14,7 @@ import logging from typing import ( TYPE_CHECKING, + AbstractSet, Any, Awaitable, Callable, @@ -23,7 +24,6 @@ List, Mapping, Optional, - Set, Tuple, ) @@ -527,7 +527,7 @@ async def get_current_state_event( ) return state_map.get(key) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: + async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state. Blocks until we have full state for the given room. This only happens for rooms @@ -584,7 +584,7 @@ async def get_current_hosts_in_room_or_partial_state_approximation( async def get_users_in_room_with_profiles( self, room_id: str - ) -> Dict[str, ProfileInfo]: + ) -> Mapping[str, ProfileInfo]: """ Get the current users in the room with their profiles. If the room is currently partial-stated, this will block until the room has diff --git a/synapse/storage/database.py b/synapse/storage/database.py index e20c5c5302fa..fec4ae5b970d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -34,6 +34,7 @@ Tuple, Type, TypeVar, + Union, cast, overload, ) @@ -100,6 +101,15 @@ } +class _PoolConnection(Connection): + """ + A Connection from twisted.enterprise.adbapi.Connection. + """ + + def reconnect(self) -> None: + ... + + def make_pool( reactor: IReactorCore, db_config: DatabaseConnectionConfig, @@ -499,6 +509,7 @@ class DatabasePool: """ _TXN_ID = 0 + engine: BaseDatabaseEngine def __init__( self, @@ -671,7 +682,15 @@ def new_transaction( f = cast(types.FunctionType, func) # type: ignore[redundant-cast] if f.__closure__: for i, cell in enumerate(f.__closure__): - if inspect.isgenerator(cell.cell_contents): + try: + contents = cell.cell_contents + except ValueError: + # cell.cell_contents can raise if the "cell" is empty, + # which indicates that the variable is currently + # unbound. + continue + + if inspect.isgenerator(contents): logger.error( "Programming error: function %s references generator %s " "via its closure", @@ -847,7 +866,8 @@ async def _runInteraction() -> R: try: with opentracing.start_active_span(f"db.{desc}"): result = await self.runWithConnection( - self.new_transaction, + # mypy seems to have an issue with this, maybe a bug? + self.new_transaction, # type: ignore[arg-type] desc, after_callbacks, async_after_callbacks, @@ -883,7 +903,7 @@ async def _runInteraction() -> R: async def runWithConnection( self, - func: Callable[..., R], + func: Callable[Concatenate[LoggingDatabaseConnection, P], R], *args: Any, db_autocommit: bool = False, isolation_level: Optional[int] = None, @@ -917,7 +937,7 @@ async def runWithConnection( start_time = monotonic_time() - def inner_func(conn, *args, **kwargs): + def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R: # We shouldn't be in a transaction. If we are then something # somewhere hasn't committed after doing work. (This is likely only # possible during startup, as `run*` will ensure changes are @@ -1010,7 +1030,7 @@ async def execute( decoder: Optional[Callable[[Cursor], R]], query: str, *args: Any, - ) -> R: + ) -> Union[List[Tuple[Any, ...]], R]: """Runs a single query for a result set. Args: @@ -1023,7 +1043,7 @@ async def execute( The result of decoder(results) """ - def interaction(txn): + def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]: txn.execute(query, args) if decoder: return decoder(txn) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index b421a57ba839..216d25da8d78 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -44,7 +44,7 @@ from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore -from .filtering import FilteringStore +from .filtering import FilteringWorkerStore from .keys import KeyStore from .lock import LockStore from .media_repository import MediaRepositoryStore @@ -100,7 +100,7 @@ class DataStore( EventFederationStore, MediaRepositoryStore, RejectionsStore, - FilteringStore, + FilteringWorkerStore, PusherStore, PushRuleStore, ApplicationServiceTransactionStore, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 939a08aad162..ea2af798cc6a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -22,6 +22,7 @@ FrozenSet, Iterable, List, + Mapping, Optional, Tuple, cast, @@ -40,7 +41,6 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -68,14 +68,12 @@ def __init__( ): super().__init__(database, db_conn, hs) - # `_can_write_to_account_data` indicates whether the current worker is allowed - # to write account data. A value of `True` implies that `_account_data_id_gen` - # is an `AbstractStreamIdGenerator` and not just a tracker. - self._account_data_id_gen: AbstractStreamIdTracker self._can_write_to_account_data = ( self._instance_name in hs.config.worker.writers.account_data ) + self._account_data_id_gen: AbstractStreamIdGenerator + if isinstance(database.engine, PostgresEngine): self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, @@ -127,25 +125,25 @@ def get_max_account_data_stream_id(self) -> int: return self._account_data_id_gen.get_current_token() @cached() - async def get_account_data_for_user( + async def get_global_account_data_for_user( self, user_id: str - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: + ) -> Mapping[str, JsonDict]: """ - Get all the client account_data for a user. + Get all the global client account_data for a user. If experimental MSC3391 support is enabled, any entries with an empty content body are excluded; as this means they have been deleted. Args: user_id: The user to get the account_data for. + Returns: - A 2-tuple of a dict of global account_data and a dict mapping from - room_id string to per room account_data dicts. + The global account_data. """ - def get_account_data_for_user_txn( + def get_global_account_data_for_user( txn: LoggingTransaction, - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: + ) -> Dict[str, JsonDict]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -163,10 +161,34 @@ def get_account_data_for_user_txn( txn.execute(sql, (user_id,)) rows = self.db_pool.cursor_to_dict(txn) - global_account_data = { + return { row["account_data_type"]: db_to_json(row["content"]) for row in rows } + return await self.db_pool.runInteraction( + "get_global_account_data_for_user", get_global_account_data_for_user + ) + + @cached() + async def get_room_account_data_for_user( + self, user_id: str + ) -> Mapping[str, Mapping[str, JsonDict]]: + """ + Get all of the per-room client account_data for a user. + + If experimental MSC3391 support is enabled, any entries with an empty + content body are excluded; as this means they have been deleted. + + Args: + user_id: The user to get the account_data for. + + Returns: + A dict mapping from room_id string to per-room account_data dicts. + """ + + def get_room_account_data_for_user_txn( + txn: LoggingTransaction, + ) -> Dict[str, Dict[str, JsonDict]]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -190,10 +212,10 @@ def get_account_data_for_user_txn( room_data[row["account_data_type"]] = db_to_json(row["content"]) - return global_account_data, by_room + return by_room return await self.db_pool.runInteraction( - "get_account_data_for_user", get_account_data_for_user_txn + "get_room_account_data_for_user_txn", get_room_account_data_for_user_txn ) @cached(num_args=2, max_entries=5000, tree=True) @@ -217,10 +239,41 @@ async def get_global_account_data_by_type_for_user( else: return None + async def get_latest_stream_id_for_global_account_data_by_type_for_user( + self, user_id: str, data_type: str + ) -> Optional[int]: + """ + Returns: + The stream ID of the account data, + or None if there is no such account data. + """ + + def get_latest_stream_id_for_global_account_data_by_type_for_user_txn( + txn: LoggingTransaction, + ) -> Optional[int]: + sql = """ + SELECT stream_id FROM account_data + WHERE user_id = ? AND account_data_type = ? + ORDER BY stream_id DESC + LIMIT 1 + """ + txn.execute(sql, (user_id, data_type)) + + row = txn.fetchone() + if row: + return row[0] + else: + return None + + return await self.db_pool.runInteraction( + "get_latest_stream_id_for_global_account_data_by_type_for_user", + get_latest_stream_id_for_global_account_data_by_type_for_user_txn, + ) + @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonDict]: """Get all the client account_data for a user for a room. Args: @@ -347,36 +400,61 @@ def get_updated_room_account_data_txn( "get_updated_room_account_data", get_updated_room_account_data_txn ) - async def get_updated_account_data_for_user( + async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: - """Get all the client account_data for a that's changed for a user + ) -> Dict[str, JsonDict]: + """Get all the global account_data that's changed for a user. Args: user_id: The user to get the account_data for. stream_id: The point in the stream since which to get updates + Returns: - A deferred pair of a dict of global account_data and a dict - mapping from room_id string to per room account_data dicts. + A dict of global account_data. """ - def get_updated_account_data_for_user_txn( + def get_updated_global_account_data_for_user( txn: LoggingTransaction, - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: - sql = ( - "SELECT account_data_type, content FROM account_data" - " WHERE user_id = ? AND stream_id > ?" - ) - + ) -> Dict[str, JsonDict]: + sql = """ + SELECT account_data_type, content FROM account_data + WHERE user_id = ? AND stream_id > ? + """ txn.execute(sql, (user_id, stream_id)) - global_account_data = {row[0]: db_to_json(row[1]) for row in txn} + return {row[0]: db_to_json(row[1]) for row in txn} - sql = ( - "SELECT room_id, account_data_type, content FROM room_account_data" - " WHERE user_id = ? AND stream_id > ?" - ) + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(stream_id) + ) + if not changed: + return {} + + return await self.db_pool.runInteraction( + "get_updated_global_account_data_for_user", + get_updated_global_account_data_for_user, + ) + + async def get_updated_room_account_data_for_user( + self, user_id: str, stream_id: int + ) -> Dict[str, Dict[str, JsonDict]]: + """Get all the room account_data that's changed for a user. + Args: + user_id: The user to get the account_data for. + stream_id: The point in the stream since which to get updates + + Returns: + A dict mapping from room_id string to per room account_data dicts. + """ + + def get_updated_room_account_data_for_user_txn( + txn: LoggingTransaction, + ) -> Dict[str, Dict[str, JsonDict]]: + sql = """ + SELECT room_id, account_data_type, content FROM room_account_data + WHERE user_id = ? AND stream_id > ? + """ txn.execute(sql, (user_id, stream_id)) account_data_by_room: Dict[str, Dict[str, JsonDict]] = {} @@ -384,16 +462,17 @@ def get_updated_account_data_for_user_txn( room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) - return global_account_data, account_data_by_room + return account_data_by_room changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: - return {}, {} + return {} return await self.db_pool.runInteraction( - "get_updated_account_data_for_user", get_updated_account_data_for_user_txn + "get_updated_room_account_data_for_user", + get_updated_room_account_data_for_user_txn, ) @cached(max_entries=5000, iterable=True) @@ -449,7 +528,8 @@ def process_replication_rows( self.get_global_account_data_by_type_for_user.invalidate( (row.user_id, row.data_type) ) - self.get_account_data_for_user.invalidate((row.user_id,)) + self.get_global_account_data_for_user.invalidate((row.user_id,)) + self.get_room_account_data_for_user.invalidate((row.user_id,)) self.get_account_data_for_room.invalidate((row.user_id, row.room_id)) self.get_account_data_for_room_and_type.invalidate( (row.user_id, row.room_id, row.data_type) @@ -480,7 +560,6 @@ async def add_account_data_to_room( The maximum stream ID. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) content_json = json_encoder.encode(content) @@ -497,7 +576,7 @@ async def add_account_data_to_room( ) self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_room_account_data_for_user.invalidate((user_id,)) self.get_account_data_for_room.invalidate((user_id, room_id)) self.get_account_data_for_room_and_type.prefill( (user_id, room_id, account_data_type), content @@ -507,7 +586,7 @@ async def add_account_data_to_room( async def remove_account_data_for_room( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[int]: + ) -> int: """Delete the room account data for the user of a given type. Args: @@ -520,7 +599,6 @@ async def remove_account_data_for_room( data to delete. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) def _remove_account_data_for_room_txn( txn: LoggingTransaction, next_id: int @@ -559,15 +637,13 @@ def _remove_account_data_for_room_txn( next_id, ) - if not row_updated: - return None - - self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) - self.get_account_data_for_room.invalidate((user_id, room_id)) - self.get_account_data_for_room_and_type.prefill( - (user_id, room_id, account_data_type), {} - ) + if row_updated: + self._account_data_stream_cache.entity_has_changed(user_id, next_id) + self.get_room_account_data_for_user.invalidate((user_id,)) + self.get_account_data_for_room.invalidate((user_id, room_id)) + self.get_account_data_for_room_and_type.prefill( + (user_id, room_id, account_data_type), {} + ) return self._account_data_id_gen.get_current_token() @@ -585,7 +661,6 @@ async def add_account_data_for_user( The maximum stream ID. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction( @@ -598,7 +673,7 @@ async def add_account_data_for_user( ) self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_global_account_data_for_user.invalidate((user_id,)) self.get_global_account_data_by_type_for_user.invalidate( (user_id, account_data_type) ) @@ -690,7 +765,7 @@ async def remove_account_data_for_user( self, user_id: str, account_data_type: str, - ) -> Optional[int]: + ) -> int: """ Delete a single piece of user account data by type. @@ -707,7 +782,6 @@ async def remove_account_data_for_user( to delete. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) def _remove_account_data_for_user_txn( txn: LoggingTransaction, next_id: int @@ -777,14 +851,12 @@ def _remove_account_data_for_user_txn( next_id, ) - if not row_updated: - return None - - self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) - self.get_global_account_data_by_type_for_user.prefill( - (user_id, account_data_type), {} - ) + if row_updated: + self._account_data_stream_cache.entity_has_changed(user_id, next_id) + self.get_global_account_data_for_user.invalidate((user_id,)) + self.get_global_account_data_by_type_for_user.prefill( + (user_id, account_data_type), {} + ) return self._account_data_id_gen.get_current_token() @@ -842,7 +914,10 @@ def _purge_account_data_for_user_txn( txn, self.get_account_data_for_room_and_type, (user_id,) ) self._invalidate_cache_and_stream( - txn, self.get_account_data_for_user, (user_id,) + txn, self.get_global_account_data_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_room_account_data_for_user, (user_id,) ) self._invalidate_cache_and_stream( txn, self.get_global_account_data_by_type_for_user, (user_id,) diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 2cddd3b94529..1c052ebc5841 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -167,7 +167,7 @@ async def get_app_service_users_in_room( room_id: str, app_service: "ApplicationService", cache_context: _CacheContext, - ) -> List[str]: + ) -> Sequence[str]: """ Get all users in a room that the appservice controls. diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 14d5aa4f941c..7cb14f271fe5 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -271,9 +271,6 @@ def _invalidate_caches_for_event( if relates_to: self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,)) self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,)) - self._attempt_to_invalidate_cache( - "get_aggregation_groups_for_event", (relates_to,) - ) self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,)) self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,)) self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,)) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 193dba320877..54a120d5ac43 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -726,8 +726,8 @@ def add_messages_txn( ], ) - for (user_id, messages_by_device) in edu["messages"].items(): - for (device_id, msg) in messages_by_device.items(): + for user_id, messages_by_device in edu["messages"].items(): + for device_id, msg in messages_by_device.items(): with start_active_span("store_outgoing_to_device_message"): set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["sender"]) set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["message_id"]) @@ -968,7 +968,6 @@ async def _remove_dead_devices_from_device_inbox( def _remove_dead_devices_from_device_inbox_txn( txn: LoggingTransaction, ) -> Tuple[int, bool]: - if "max_stream_id" in progress: max_stream_id = progress["max_stream_id"] else: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 6ed699f7824e..c8504ef04fd7 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -21,6 +21,7 @@ Dict, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -51,7 +52,6 @@ from synapse.storage.types import Cursor from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, StreamIdGenerator, ) from synapse.types import JsonDict, StrCollection, get_verify_key_from_cross_signing_key @@ -90,7 +90,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._device_list_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "device_lists_stream", @@ -100,6 +100,7 @@ def __init__( ("device_lists_outbound_pokes", "stream_id"), ("device_lists_changes_in_room", "stream_id"), ("device_lists_remote_pending", "stream_id"), + ("device_lists_changes_converted_stream_position", "stream_id"), ], is_writer=hs.config.worker.worker_app is None, ) @@ -184,8 +185,6 @@ def _invalidate_caches_for_devices( self._user_signature_stream_cache.entity_has_changed(row.entity, token) continue - # NOTE: here we always tell both stream change caches, either about - # the entity or just the known position. # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about # changes. @@ -205,7 +204,9 @@ def _invalidate_caches_for_devices( def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int: + async def count_devices_by_users( + self, user_ids: Optional[Collection[str]] = None + ) -> int: """Retrieve number of all devices of given users. Only returns number of devices that are not marked as hidden. @@ -216,7 +217,7 @@ async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> """ def count_devices_by_users_txn( - txn: LoggingTransaction, user_ids: List[str] + txn: LoggingTransaction, user_ids: Collection[str] ) -> int: sql = """ SELECT count(*) @@ -512,7 +513,7 @@ async def get_device_updates_by_remote( results.append(("org.matrix.signing_key_update", result)) if issue_8631_logger.isEnabledFor(logging.DEBUG): - for (user_id, edu) in results: + for user_id, edu in results: issue_8631_logger.debug( "device update to %s for %s from %s to %s: %s", destination, @@ -712,9 +713,7 @@ async def add_user_signature_change_to_streams( The new stream ID. """ - # TODO: this looks like it's _writing_. Should this be on DeviceStore rather - # than DeviceWorkerStore? - async with self._device_list_id_gen.get_next() as stream_id: # type: ignore[attr-defined] + async with self._device_list_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "add_user_sig_change_to_streams", self._add_user_signature_change_txn, @@ -749,42 +748,47 @@ def _add_user_signature_change_txn( @trace @cancellable async def get_user_devices_from_cache( - self, query_list: List[Tuple[str, Optional[str]]] - ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: + self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]] + ) -> Tuple[Set[str], Dict[str, Mapping[str, JsonDict]]]: """Get the devices (and keys if any) for remote users from the cache. Args: - query_list: List of (user_id, device_ids), if device_ids is - falsey then return all device ids for that user. + user_ids: users which should have all device IDs returned + user_and_device_ids: List of (user_id, device_ids) Returns: A tuple of (user_ids_not_in_cache, results_map), where user_ids_not_in_cache is a set of user_ids and results_map is a mapping of user_id -> device_id -> device_info. """ - user_ids = {user_id for user_id, _ in query_list} - user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids)) + unique_user_ids = user_ids | {user_id for user_id, _ in user_and_device_ids} + user_map = await self.get_device_list_last_stream_id_for_remotes( + list(unique_user_ids) + ) # We go and check if any of the users need to have their device lists # resynced. If they do then we remove them from the cached list. users_needing_resync = await self.get_user_ids_requiring_device_list_resync( - user_ids + unique_user_ids ) user_ids_in_cache = { user_id for user_id, stream_id in user_map.items() if stream_id } - users_needing_resync - user_ids_not_in_cache = user_ids - user_ids_in_cache - - results: Dict[str, Dict[str, JsonDict]] = {} - for user_id, device_id in query_list: - if user_id not in user_ids_in_cache: - continue + user_ids_not_in_cache = unique_user_ids - user_ids_in_cache - if device_id: - device = await self._get_cached_user_device(user_id, device_id) - results.setdefault(user_id, {})[device_id] = device - else: + # First fetch all the users which all devices are to be returned. + results: Dict[str, Mapping[str, JsonDict]] = {} + for user_id in user_ids: + if user_id in user_ids_in_cache: results[user_id] = await self.get_cached_devices_for_user(user_id) + # Then fetch all device-specific requests, but skip users we've already + # fetched all devices for. + device_specific_results: Dict[str, Dict[str, JsonDict]] = {} + for user_id, device_id in user_and_device_ids: + if user_id in user_ids_in_cache and user_id not in user_ids: + device = await self._get_cached_user_device(user_id, device_id) + device_specific_results.setdefault(user_id, {})[device_id] = device + results.update(device_specific_results) set_tag("in_cache", str(results)) set_tag("not_in_cache", str(user_ids_not_in_cache)) @@ -802,7 +806,7 @@ async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDic return db_to_json(content) @cached() - async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]: + async def get_cached_devices_for_user(self, user_id: str) -> Mapping[str, JsonDict]: devices = await self.db_pool.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, @@ -1311,7 +1315,7 @@ def _prune_txn(txn: LoggingTransaction) -> None: ) """ count = 0 - for (destination, user_id, stream_id, device_id) in rows: + for destination, user_id, stream_id, device_id in rows: txn.execute( delete_sql, (destination, user_id, stream_id, stream_id, device_id) ) diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 5903fdaf007a..44aa181174ac 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Sequence, Tuple import attr @@ -74,7 +74,7 @@ async def get_room_alias_creator(self, room_alias: str) -> str: ) @cached(max_entries=5000) - async def get_aliases_for_room(self, room_id: str) -> List[str]: + async def get_aliases_for_room(self, room_id: str) -> Sequence[str]: return await self.db_pool.simple_select_onecol( "room_aliases", {"room_id": room_id}, diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 6240f9a75ed3..9f8d2e4bea65 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -108,7 +108,7 @@ async def add_e2e_room_keys( raise StoreError(404, "No backup with that version exists") values = [] - for (room_id, session_id, room_key) in room_keys: + for room_id, session_id, room_key in room_keys: values.append( ( user_id, diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index c4ac6c33ba54..a3b6c8ae8e82 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -20,7 +20,9 @@ Dict, Iterable, List, + Mapping, Optional, + Sequence, Tuple, Union, cast, @@ -242,9 +244,7 @@ async def get_e2e_device_keys_and_signatures( set_tag("include_all_devices", include_all_devices) set_tag("include_deleted_devices", include_deleted_devices) - result = await self.db_pool.runInteraction( - "get_e2e_device_keys", - self._get_e2e_device_keys_txn, + result = await self._get_e2e_device_keys( query_list, include_all_devices, include_deleted_devices, @@ -260,13 +260,13 @@ async def get_e2e_device_keys_and_signatures( for batch in batch_iter(signature_query, 50): cross_sigs_result = await self.db_pool.runInteraction( - "get_e2e_cross_signing_signatures", + "get_e2e_cross_signing_signatures_for_devices", self._get_e2e_cross_signing_signatures_for_devices_txn, batch, ) # add each cross-signing signature to the correct device in the result dict. - for (user_id, key_id, device_id, signature) in cross_sigs_result: + for user_id, key_id, device_id, signature in cross_sigs_result: target_device_result = result[user_id][device_id] # We've only looked up cross-signatures for non-deleted devices with key # data. @@ -283,9 +283,8 @@ async def get_e2e_device_keys_and_signatures( log_kv(result) return result - def _get_e2e_device_keys_txn( + async def _get_e2e_device_keys( self, - txn: LoggingTransaction, query_list: Collection[Tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, @@ -309,7 +308,7 @@ def _get_e2e_device_keys_txn( # devices. user_list = [] user_device_list = [] - for (user_id, device_id) in query_list: + for user_id, device_id in query_list: if device_id is None: user_list.append(user_id) else: @@ -317,7 +316,7 @@ def _get_e2e_device_keys_txn( if user_list: user_id_in_list_clause, user_args = make_in_list_sql_clause( - txn.database_engine, "user_id", user_list + self.database_engine, "user_id", user_list ) query_clauses.append(user_id_in_list_clause) query_params_list.append(user_args) @@ -330,13 +329,16 @@ def _get_e2e_device_keys_txn( user_device_id_in_list_clause, user_device_args, ) = make_tuple_in_list_sql_clause( - txn.database_engine, ("user_id", "device_id"), user_device_batch + self.database_engine, ("user_id", "device_id"), user_device_batch ) query_clauses.append(user_device_id_in_list_clause) query_params_list.append(user_device_args) result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {} - for query_clause, query_params in zip(query_clauses, query_params_list): + + def get_e2e_device_keys_txn( + txn: LoggingTransaction, query_clause: str, query_params: list + ) -> None: sql = ( "SELECT user_id, device_id, " " d.display_name, " @@ -351,7 +353,7 @@ def _get_e2e_device_keys_txn( txn.execute(sql, query_params) - for (user_id, device_id, display_name, key_json) in txn: + for user_id, device_id, display_name, key_json in txn: assert device_id is not None if include_deleted_devices: deleted_devices.remove((user_id, device_id)) @@ -359,6 +361,14 @@ def _get_e2e_device_keys_txn( display_name, db_to_json(key_json) if key_json else None ) + for query_clause, query_params in zip(query_clauses, query_params_list): + await self.db_pool.runInteraction( + "_get_e2e_device_keys", + get_e2e_device_keys_txn, + query_clause, + query_params, + ) + if include_deleted_devices: for user_id, device_id in deleted_devices: if device_id is None: @@ -380,7 +390,7 @@ def _get_e2e_cross_signing_signatures_for_devices_txn( signature_query_clauses = [] signature_query_params = [] - for (user_id, device_id) in device_query: + for user_id, device_id in device_query: signature_query_clauses.append( "target_user_id = ? AND target_device_id = ? AND user_id = ?" ) @@ -691,7 +701,7 @@ def _set_e2e_fallback_keys_txn( @cached(max_entries=10000) async def get_e2e_unused_fallback_key_types( self, user_id: str, device_id: str - ) -> List[str]: + ) -> Sequence[str]: """Returns the fallback key types that have an unused key. Args: @@ -731,7 +741,7 @@ async def get_e2e_cross_signing_key( return user_keys.get(key_type) @cached(num_args=1) - def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Dict[str, JsonDict]: + def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Mapping[str, JsonDict]: """Dummy function. Only used to make a cache for _get_bare_e2e_cross_signing_keys_bulk. """ @@ -744,7 +754,7 @@ def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Dict[str, JsonDict]: ) async def _get_bare_e2e_cross_signing_keys_bulk( self, user_ids: Iterable[str] - ) -> Dict[str, Optional[Dict[str, JsonDict]]]: + ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if the signatures for the calling user need to be fetched. @@ -765,7 +775,7 @@ async def _get_bare_e2e_cross_signing_keys_bulk( ) # The `Optional` comes from the `@cachedList` decorator. - return cast(Dict[str, Optional[Dict[str, JsonDict]]], result) + return cast(Dict[str, Optional[Mapping[str, JsonDict]]], result) def _get_bare_e2e_cross_signing_keys_bulk_txn( self, @@ -924,7 +934,7 @@ def _get_e2e_cross_signing_signatures_txn( @cancellable async def get_e2e_cross_signing_keys_bulk( self, user_ids: List[str], from_user_id: Optional[str] = None - ) -> Dict[str, Optional[Dict[str, JsonDict]]]: + ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: """Returns the cross-signing keys for a set of users. Args: @@ -940,11 +950,14 @@ async def get_e2e_cross_signing_keys_bulk( result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids) if from_user_id: - result = await self.db_pool.runInteraction( - "get_e2e_cross_signing_signatures", - self._get_e2e_cross_signing_signatures_txn, - result, - from_user_id, + result = cast( + Dict[str, Optional[Mapping[str, JsonDict]]], + await self.db_pool.runInteraction( + "get_e2e_cross_signing_signatures", + self._get_e2e_cross_signing_signatures_txn, + result, + from_user_id, + ), ) return result diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index bbee02ab18f0..ff3edeb7160b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -22,6 +22,7 @@ Iterable, List, Optional, + Sequence, Set, Tuple, cast, @@ -1004,7 +1005,9 @@ def get_insertion_event_backward_extremities_in_room_txn( room_id, ) - async def get_max_depth_of(self, event_ids: List[str]) -> Tuple[Optional[str], int]: + async def get_max_depth_of( + self, event_ids: Collection[str] + ) -> Tuple[Optional[str], int]: """Returns the event ID and depth for the event that has the max depth from a set of event IDs Args: @@ -1141,7 +1144,7 @@ def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> List[str]: ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> List[str]: + async def get_latest_event_ids_in_room(self, room_id: str) -> Sequence[str]: return await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, @@ -1171,7 +1174,7 @@ def _get_min_depth_interaction( @cancellable async def get_forward_extremities_for_room_at_stream_ordering( self, room_id: str, stream_ordering: int - ) -> List[str]: + ) -> Sequence[str]: """For a given room_id and stream_ordering, return the forward extremeties of the room at that point in "time". @@ -1204,7 +1207,7 @@ async def get_forward_extremities_for_room_at_stream_ordering( @cached(max_entries=5000, num_args=2) async def _get_forward_extremeties_for_room( self, room_id: str, stream_ordering: int - ) -> List[str]: + ) -> Sequence[str]: """For a given room_id and stream_ordering, return the forward extremeties of the room at that point in "time". @@ -1609,7 +1612,6 @@ def _get_missing_events( latest_events: List[str], limit: int, ) -> List[str]: - seen_events = set(earliest_events) front = set(latest_events) - seen_events event_results: List[str] = [] diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 8c1a6734a6f3..886021a889b4 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -203,11 +203,18 @@ class RoomNotifCounts: # Map of thread ID to the notification counts. threads: Dict[str, NotifCounts] + @staticmethod + def empty() -> "RoomNotifCounts": + return _EMPTY_ROOM_NOTIF_COUNTS + def __len__(self) -> int: # To properly account for the amount of space in any caches. return len(self.threads) + 1 +_EMPTY_ROOM_NOTIF_COUNTS = RoomNotifCounts(NotifCounts(), {}) + + def _serialize_action( actions: Collection[Union[Mapping, str]], is_highlight: bool ) -> str: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index f4ce258ed6bf..fc9d1727c4ac 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -16,7 +16,6 @@ import itertools import logging from collections import OrderedDict -from http import HTTPStatus from typing import ( TYPE_CHECKING, Any, @@ -26,7 +25,6 @@ Iterable, List, Optional, - Sequence, Set, Tuple, ) @@ -36,7 +34,7 @@ import synapse.metrics from synapse.api.constants import EventContentFields, EventTypes, RelationTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import PartialStateConflictError from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext @@ -52,7 +50,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id from synapse.util import json_encoder from synapse.util.iterutils import batch_iter, sorted_topologically from synapse.util.stringutils import non_null_str_or_none @@ -72,24 +70,6 @@ ) -class PartialStateConflictError(SynapseError): - """An internal error raised when attempting to persist an event with partial state - after the room containing the event has been un-partial stated. - - This error should be handled by recomputing the event context and trying again. - - This error has an HTTP status code so that it can be transported over replication. - It should not be exposed to clients. - """ - - def __init__(self) -> None: - super().__init__( - HTTPStatus.CONFLICT, - msg="Cannot persist partial state event in un-partial stated room", - errcode=Codes.UNKNOWN, - ) - - @attr.s(slots=True, auto_attribs=True) class DeltaState: """Deltas to use to update the `current_state_events` table. @@ -306,7 +286,7 @@ async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> Set[str] # The set of event_ids to return. This includes all soft-failed events # and their prev events. - existing_prevs = set() + existing_prevs: Set[str] = set() def _get_prevs_before_rejected_txn( txn: LoggingTransaction, batch: Collection[str] @@ -489,7 +469,6 @@ def _persist_event_auth_chain_txn( txn: LoggingTransaction, events: List[EventBase], ) -> None: - # We only care about state events, so this if there are no state events. if not any(e.is_state() for e in events): return @@ -571,7 +550,7 @@ def _add_chain_cover_index( event_chain_id_gen: SequenceGenerator, event_to_room_id: Dict[str, str], event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, Sequence[str]], + event_to_auth_chain: Dict[str, StrCollection], ) -> None: """Calculate the chain cover index for the given events. @@ -865,7 +844,7 @@ def _allocate_chain_ids( event_chain_id_gen: SequenceGenerator, event_to_room_id: Dict[str, str], event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, Sequence[str]], + event_to_auth_chain: Dict[str, StrCollection], events_to_calc_chain_id_for: Set[str], chain_map: Dict[str, Tuple[int, int]], ) -> Dict[str, Tuple[int, int]]: @@ -2045,10 +2024,6 @@ def _handle_redact_relations( self.store._invalidate_cache_and_stream( txn, self.store.get_relations_for_event, (redacted_relates_to,) ) - if rel_type == RelationTypes.ANNOTATION: - self.store._invalidate_cache_and_stream( - txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) - ) if rel_type == RelationTypes.REFERENCE: self.store._invalidate_cache_and_stream( txn, self.store.get_references_for_event, (redacted_relates_to,) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b9d3c36d602c..daef3685b09a 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, cast +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast import attr @@ -29,7 +29,7 @@ ) from synapse.storage.databases.main.events import PersistEventsStore from synapse.storage.types import Cursor -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection if TYPE_CHECKING: from synapse.server import HomeServer @@ -709,7 +709,7 @@ def _event_store_labels_txn(txn: LoggingTransaction) -> int: nbrows = 0 last_row_event_id = "" - for (event_id, event_json_raw) in results: + for event_id, event_json_raw in results: try: event_json = db_to_json(event_json_raw) @@ -1061,7 +1061,7 @@ def _calculate_chain_cover_txn( self.event_chain_id_gen, # type: ignore[attr-defined] event_to_room_id, event_to_types, - cast(Dict[str, Sequence[str]], event_to_auth_chain), + cast(Dict[str, StrCollection], event_to_auth_chain), ) return _CalculateChainCover( @@ -1167,7 +1167,7 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: results = list(txn) # (event_id, parent_id, rel_type) for each relation relations_to_insert: List[Tuple[str, str, str]] = [] - for (event_id, event_json_raw) in results: + for event_id, event_json_raw in results: try: event_json = db_to_json(event_json_raw) except Exception as e: @@ -1219,9 +1219,6 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined] ) - self._invalidate_cache_and_stream( # type: ignore[attr-defined] - txn, self.get_aggregation_groups_for_event, cache_tuple # type: ignore[attr-defined] - ) self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined] ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 0c419b4715a3..8e5ebd53efcc 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -73,7 +73,6 @@ from synapse.storage.types import Cursor from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -189,8 +188,8 @@ def __init__( ): super().__init__(database, db_conn, hs) - self._stream_id_gen: AbstractStreamIdTracker - self._backfill_id_gen: AbstractStreamIdTracker + self._stream_id_gen: AbstractStreamIdGenerator + self._backfill_id_gen: AbstractStreamIdGenerator if isinstance(database.engine, PostgresEngine): # If we're using Postgres than we can use `MultiWriterIdGenerator` # regardless of whether this process writes to the streams or not. @@ -820,7 +819,6 @@ async def get_missing_events_from_cache_or_db() -> Dict[ # the events have been redacted, and if so pulling the redaction event # out of the database to check it. # - missing_events = {} try: # Try to fetch from any external cache. We already checked the # in-memory cache above. @@ -1512,7 +1510,7 @@ def _fetch_event_rows( txn.execute(redactions_sql + clause, args) - for (redacter, redacted) in txn: + for redacter, redacted in txn: d = event_dict.get(redacted) if d: d.redactions.append(redacter) diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 12f3b601f11c..8e57c8e5a07a 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -17,7 +17,7 @@ from canonicaljson import encode_canonical_json -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, StoreError, SynapseError from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict @@ -46,8 +46,6 @@ async def get_user_filter( return db_to_json(def_json) - -class FilteringStore(FilteringWorkerStore): async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int: def_json = encode_canonical_json(user_filter) @@ -79,4 +77,23 @@ def _do_txn(txn: LoggingTransaction) -> int: return filter_id - return await self.db_pool.runInteraction("add_user_filter", _do_txn) + attempts = 0 + while True: + # Try a few times. + # This is technically needed if a user tries to create two filters at once, + # leading to two concurrent transactions. + # The failure case would be: + # - SELECT filter_id ... filter_json = ? → both transactions return no rows + # - SELECT MAX(filter_id) ... → both transactions return e.g. 5 + # - INSERT INTO ... → both transactions insert filter_id = 6 + # One of the transactions will commit. The other will get a unique key + # constraint violation error (IntegrityError). This is not the same as a + # serialisability violation, which would be automatically retried by + # `runInteraction`. + try: + return await self.db_pool.runInteraction("add_user_filter", _do_txn) + except self.db_pool.engine.module.IntegrityError: + attempts += 1 + + if attempts >= 5: + raise StoreError(500, "Couldn't generate a filter ID.") diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index b202c5eb87a6..fa8be214ce90 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -196,7 +196,6 @@ async def get_local_media_by_user_paginate( def get_local_media_by_user_paginate_txn( txn: LoggingTransaction, ) -> Tuple[List[Dict[str, Any]], int]: - # Set ordering order_by_column = MediaSortOrder(order_by).value diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index db9a24db5ec6..4b1061e6d7d0 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.database import ( @@ -95,7 +95,7 @@ def _count_users(txn: LoggingTransaction) -> int: return await self.db_pool.runInteraction("count_users", _count_users) @cached(num_args=0) - async def get_monthly_active_count_by_service(self) -> Dict[str, int]: + async def get_monthly_active_count_by_service(self) -> Mapping[str, int]: """Generates current count of monthly active users broken down by service. A service is typically an appservice but also includes native matrix users. Since the `monthly_active_users` table is populated from the `user_ips` table diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 9213ce0b5abb..7a7c0d9c753d 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -325,6 +325,7 @@ async def purge_room(self, room_id: str) -> List[int]: # We then run the same purge a second time without this isolation level to # purge any of those rows which were added during the first. + logger.info("[purge] Starting initial main purge of [1/2]") state_groups_to_delete = await self.db_pool.runInteraction( "purge_room", self._purge_room_txn, @@ -332,6 +333,7 @@ async def purge_room(self, room_id: str) -> List[int]: isolation_level=IsolationLevel.READ_COMMITTED, ) + logger.info("[purge] Starting secondary main purge of [2/2]") state_groups_to_delete.extend( await self.db_pool.runInteraction( "purge_room", @@ -339,6 +341,7 @@ async def purge_room(self, room_id: str) -> List[int]: room_id=room_id, ), ) + logger.info("[purge] Done with main purge") return state_groups_to_delete @@ -376,7 +379,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: ) referenced_chain_id_tuples = list(txn) - logger.info("[purge] removing events from event_auth_chain_links") + logger.info("[purge] removing from event_auth_chain_links") txn.executemany( """ DELETE FROM event_auth_chain_links WHERE @@ -399,7 +402,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "rejections", "state_events", ): - logger.info("[purge] removing %s from %s", room_id, table) + logger.info("[purge] removing from %s", table) txn.execute( """ @@ -420,12 +423,14 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "event_push_actions", "event_search", "event_failed_pull_attempts", + # Note: the partial state tables have foreign keys between each other, and to + # `events` and `rooms`. We need to delete from them in the right order. "partial_state_events", + "partial_state_rooms_servers", + "partial_state_rooms", "events", "federation_inbound_events_staging", "local_current_membership", - "partial_state_rooms_servers", - "partial_state_rooms", "receipts_graph", "receipts_linearized", "room_aliases", @@ -452,7 +457,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # happy "rooms", ): - logger.info("[purge] removing %s from %s", room_id, table) + logger.info("[purge] removing from %s", table) txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,)) # Other tables we do NOT need to clear out: @@ -484,6 +489,4 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # that already exist. self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) - logger.info("[purge] done") - return state_groups diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index ced9e231f625..4bdda0724056 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -46,7 +46,6 @@ from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, IdGenerator, StreamIdGenerator, ) @@ -118,7 +117,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._push_rules_stream_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "push_rules_stream", diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index df53e726e62a..9a24f7a65526 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -36,7 +36,6 @@ ) from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, StreamIdGenerator, ) from synapse.types import JsonDict @@ -60,7 +59,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._pushers_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "pushers", @@ -344,7 +343,6 @@ async def _remove_deactivated_pushers(self, progress: dict, batch_size: int) -> last_user = progress.get("last_user", "") def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT name FROM users WHERE deactivated = ? and name > ? @@ -392,7 +390,6 @@ async def _remove_stale_pushers(self, progress: dict, batch_size: int) -> int: last_pusher = progress.get("last_pusher", 0) def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT p.id, access_token FROM pushers AS p LEFT JOIN access_tokens AS a ON (p.access_token = a.id) @@ -449,7 +446,6 @@ async def _remove_deleted_email_pushers( last_pusher = progress.get("last_pusher", 0) def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT p.id, p.user_name, p.app_id, p.pushkey FROM pushers AS p diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index b2048fb167e1..dfa84deec5d7 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -21,7 +21,9 @@ Dict, Iterable, List, + Mapping, Optional, + Sequence, Tuple, cast, ) @@ -39,7 +41,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import ( - AbstractStreamIdTracker, + AbstractStreamIdGenerator, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -65,7 +67,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._receipts_id_gen: AbstractStreamIdTracker + self._receipts_id_gen: AbstractStreamIdGenerator if isinstance(database.engine, PostgresEngine): self._can_write_to_receipts = ( @@ -301,7 +303,7 @@ async def get_linearized_receipts_for_rooms( async def get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> List[dict]: + ) -> Sequence[JsonDict]: """Get receipts for a single room for sending to clients. Args: @@ -326,7 +328,7 @@ async def get_linearized_receipts_for_room( @cached(tree=True) async def _get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> List[JsonDict]: + ) -> Sequence[JsonDict]: """See get_linearized_receipts_for_room""" def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: @@ -369,7 +371,7 @@ def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: ) async def _get_linearized_receipts_for_rooms( self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None - ) -> Dict[str, List[JsonDict]]: + ) -> Dict[str, Sequence[JsonDict]]: if not room_ids: return {} @@ -432,7 +434,7 @@ def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: # ) async def get_linearized_receipts_for_all_rooms( self, to_key: int, from_key: Optional[int] = None - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonDict]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. @@ -791,7 +793,7 @@ async def insert_receipt( "insert_receipt_conv", self._graph_to_linear, room_id, event_ids ) - async with self._receipts_id_gen.get_next() as stream_id: # type: ignore[attr-defined] + async with self._receipts_id_gen.get_next() as stream_id: event_ts = await self.db_pool.runInteraction( "insert_linearized_receipt", self._insert_linearized_receipt_txn, @@ -911,7 +913,6 @@ async def _populate_receipt_event_stream_ordering( def _populate_receipt_event_stream_ordering_txn( txn: LoggingTransaction, ) -> bool: - if "max_stream_id" in progress: max_stream_id = progress["max_stream_id"] else: diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 31f0f2bd3d84..717237e02496 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,7 +16,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union, cast import attr @@ -192,7 +192,7 @@ def __init__( ) @cached() - async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]: + async def get_user_by_id(self, user_id: str) -> Optional[Mapping[str, Any]]: """Deprecated: use get_userinfo_by_id instead""" def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: @@ -1002,19 +1002,6 @@ async def user_delete_threepid( desc="user_delete_threepid", ) - async def user_delete_threepids(self, user_id: str) -> None: - """Delete all threepid this user has bound - - Args: - user_id: The user id to delete all threepids of - - """ - await self.db_pool.simple_delete( - "user_threepids", - keyvalues={"user_id": user_id}, - desc="user_delete_threepids", - ) - async def add_user_bound_threepid( self, user_id: str, medium: str, address: str, id_server: str ) -> None: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 0018d6f7abcb..bc3a83919c34 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -22,6 +22,7 @@ List, Mapping, Optional, + Sequence, Set, Tuple, Union, @@ -171,7 +172,7 @@ async def get_relations_for_event( direction: Direction = Direction.BACKWARDS, from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, - ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: + ) -> Tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: """Get a list of relations for an event, ordered by topological ordering. Args: @@ -396,141 +397,6 @@ async def event_is_target_of_relation(self, parent_id: str) -> bool: ) return result is not None - @cached() - async def get_aggregation_groups_for_event(self, event_id: str) -> List[JsonDict]: - raise NotImplementedError() - - @cachedList( - cached_method_name="get_aggregation_groups_for_event", list_name="event_ids" - ) - async def get_aggregation_groups_for_events( - self, event_ids: Collection[str] - ) -> Mapping[str, Optional[List[JsonDict]]]: - """Get a list of annotations on the given events, grouped by event type and - aggregation key, sorted by count. - - This is used e.g. to get the what and how many reactions have happend - on an event. - - Args: - event_ids: Fetch events that relate to these event IDs. - - Returns: - A map of event IDs to a list of groups of annotations that match. - Each entry is a dict with `type`, `key` and `count` fields. - """ - # The number of entries to return per event ID. - limit = 5 - - clause, args = make_in_list_sql_clause( - self.database_engine, "relates_to_id", event_ids - ) - args.append(RelationTypes.ANNOTATION) - - sql = f""" - SELECT - relates_to_id, - annotation.type, - aggregation_key, - COUNT(DISTINCT annotation.sender) - FROM events AS annotation - INNER JOIN event_relations USING (event_id) - INNER JOIN events AS parent ON - parent.event_id = relates_to_id - AND parent.room_id = annotation.room_id - WHERE - {clause} - AND relation_type = ? - GROUP BY relates_to_id, annotation.type, aggregation_key - ORDER BY relates_to_id, COUNT(*) DESC - """ - - def _get_aggregation_groups_for_events_txn( - txn: LoggingTransaction, - ) -> Mapping[str, List[JsonDict]]: - txn.execute(sql, args) - - result: Dict[str, List[JsonDict]] = {} - for event_id, type, key, count in cast( - List[Tuple[str, str, str, int]], txn - ): - event_results = result.setdefault(event_id, []) - - # Limit the number of results per event ID. - if len(event_results) == limit: - continue - - event_results.append({"type": type, "key": key, "count": count}) - - return result - - return await self.db_pool.runInteraction( - "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn - ) - - async def get_aggregation_groups_for_users( - self, event_ids: Collection[str], users: FrozenSet[str] - ) -> Dict[str, Dict[Tuple[str, str], int]]: - """Fetch the partial aggregations for an event for specific users. - - This is used, in conjunction with get_aggregation_groups_for_event, to - remove information from the results for ignored users. - - Args: - event_ids: Fetch events that relate to these event IDs. - users: The users to fetch information for. - - Returns: - A map of event ID to a map of (event type, aggregation key) to a - count of users. - """ - - if not users: - return {} - - events_sql, args = make_in_list_sql_clause( - self.database_engine, "relates_to_id", event_ids - ) - - users_sql, users_args = make_in_list_sql_clause( - self.database_engine, "annotation.sender", users - ) - args.extend(users_args) - args.append(RelationTypes.ANNOTATION) - - sql = f""" - SELECT - relates_to_id, - annotation.type, - aggregation_key, - COUNT(DISTINCT annotation.sender) - FROM events AS annotation - INNER JOIN event_relations USING (event_id) - INNER JOIN events AS parent ON - parent.event_id = relates_to_id - AND parent.room_id = annotation.room_id - WHERE {events_sql} AND {users_sql} AND relation_type = ? - GROUP BY relates_to_id, annotation.type, aggregation_key - ORDER BY relates_to_id, COUNT(*) DESC - """ - - def _get_aggregation_groups_for_users_txn( - txn: LoggingTransaction, - ) -> Dict[str, Dict[Tuple[str, str], int]]: - txn.execute(sql, args) - - result: Dict[str, Dict[Tuple[str, str], int]] = {} - for event_id, type, key, count in cast( - List[Tuple[str, str, str, int]], txn - ): - result.setdefault(event_id, {})[(type, key)] = count - - return result - - return await self.db_pool.runInteraction( - "get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn - ) - @cached() async def get_references_for_event(self, event_id: str) -> List[JsonDict]: raise NotImplementedError() diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 644bbb88788f..3825bd60797c 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1417,6 +1417,204 @@ def get_un_partial_stated_rooms_from_stream_txn( get_un_partial_stated_rooms_from_stream_txn, ) + async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: + """Retrieve an event report + + Args: + report_id: ID of reported event in database + Returns: + JSON dict of information from an event report or None if the + report does not exist. + """ + + def _get_event_report_txn( + txn: LoggingTransaction, report_id: int + ) -> Optional[Dict[str, Any]]: + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.content, + events.sender, + room_stats_state.canonical_alias, + room_stats_state.name, + event_json.json AS event_json + FROM event_reports AS er + LEFT JOIN events + ON events.event_id = er.event_id + JOIN event_json + ON event_json.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id + WHERE er.id = ? + """ + + txn.execute(sql, [report_id]) + row = txn.fetchone() + + if not row: + return None + + event_report = { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": db_to_json(row[5]).get("score"), + "reason": db_to_json(row[5]).get("reason"), + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + "event_json": db_to_json(row[9]), + } + + return event_report + + return await self.db_pool.runInteraction( + "get_event_report", _get_event_report_txn, report_id + ) + + async def get_event_reports_paginate( + self, + start: int, + limit: int, + direction: Direction = Direction.BACKWARDS, + user_id: Optional[str] = None, + room_id: Optional[str] = None, + ) -> Tuple[List[Dict[str, Any]], int]: + """Retrieve a paginated list of event reports + + Args: + start: event offset to begin the query from + limit: number of rows to retrieve + direction: Whether to fetch the most recent first (backwards) or the + oldest first (forwards) + user_id: search for user_id. Ignored if user_id is None + room_id: search for room_id. Ignored if room_id is None + Returns: + Tuple of: + json list of event reports + total number of event reports matching the filter criteria + """ + + def _get_event_reports_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], int]: + filters = [] + args: List[object] = [] + + if user_id: + filters.append("er.user_id LIKE ?") + args.extend(["%" + user_id + "%"]) + if room_id: + filters.append("er.room_id LIKE ?") + args.extend(["%" + room_id + "%"]) + + if direction == Direction.BACKWARDS: + order = "DESC" + else: + order = "ASC" + + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + + # We join on room_stats_state despite not using any columns from it + # because the join can influence the number of rows returned; + # e.g. a room that doesn't have state, maybe because it was deleted. + # The query returning the total count should be consistent with + # the query returning the results. + sql = """ + SELECT COUNT(*) as total_event_reports + FROM event_reports AS er + JOIN room_stats_state ON room_stats_state.room_id = er.room_id + {} + """.format( + where_clause + ) + txn.execute(sql, args) + count = cast(Tuple[int], txn.fetchone())[0] + + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.content, + events.sender, + room_stats_state.canonical_alias, + room_stats_state.name + FROM event_reports AS er + LEFT JOIN events + ON events.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id + {where_clause} + ORDER BY er.received_ts {order} + LIMIT ? + OFFSET ? + """.format( + where_clause=where_clause, + order=order, + ) + + args += [limit, start] + txn.execute(sql, args) + + event_reports = [] + for row in txn: + try: + s = db_to_json(row[5]).get("score") + r = db_to_json(row[5]).get("reason") + except Exception: + logger.error("Unable to parse json from event_reports: %s", row[0]) + continue + event_reports.append( + { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": s, + "reason": r, + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + } + ) + + return event_reports, count + + return await self.db_pool.runInteraction( + "get_event_reports_paginate", _get_event_reports_paginate_txn + ) + + async def delete_event_report(self, report_id: int) -> bool: + """Remove an event report from database. + + Args: + report_id: Report to delete + + Returns: + Whether the report was successfully deleted or not. + """ + try: + await self.db_pool.simple_delete_one( + table="event_reports", + keyvalues={"id": report_id}, + desc="delete_event_report", + ) + except StoreError: + # Deletion failed because report does not exist + return False + + return True + class _BackgroundUpdates: REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" @@ -2139,7 +2337,19 @@ async def add_event_report( reason: Optional[str], content: JsonDict, received_ts: int, - ) -> None: + ) -> int: + """Add an event report + + Args: + room_id: Room that contains the reported event. + event_id: The reported event. + user_id: User who reports the event. + reason: Description that the user specifies. + content: Report request body (score and reason). + received_ts: Time when the user submitted the report (milliseconds). + Returns: + Id of the event report. + """ next_id = self._event_reports_id_gen.get_next() await self.db_pool.simple_insert( table="event_reports", @@ -2154,184 +2364,7 @@ async def add_event_report( }, desc="add_event_report", ) - - async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: - """Retrieve an event report - - Args: - report_id: ID of reported event in database - Returns: - JSON dict of information from an event report or None if the - report does not exist. - """ - - def _get_event_report_txn( - txn: LoggingTransaction, report_id: int - ) -> Optional[Dict[str, Any]]: - - sql = """ - SELECT - er.id, - er.received_ts, - er.room_id, - er.event_id, - er.user_id, - er.content, - events.sender, - room_stats_state.canonical_alias, - room_stats_state.name, - event_json.json AS event_json - FROM event_reports AS er - LEFT JOIN events - ON events.event_id = er.event_id - JOIN event_json - ON event_json.event_id = er.event_id - JOIN room_stats_state - ON room_stats_state.room_id = er.room_id - WHERE er.id = ? - """ - - txn.execute(sql, [report_id]) - row = txn.fetchone() - - if not row: - return None - - event_report = { - "id": row[0], - "received_ts": row[1], - "room_id": row[2], - "event_id": row[3], - "user_id": row[4], - "score": db_to_json(row[5]).get("score"), - "reason": db_to_json(row[5]).get("reason"), - "sender": row[6], - "canonical_alias": row[7], - "name": row[8], - "event_json": db_to_json(row[9]), - } - - return event_report - - return await self.db_pool.runInteraction( - "get_event_report", _get_event_report_txn, report_id - ) - - async def get_event_reports_paginate( - self, - start: int, - limit: int, - direction: Direction = Direction.BACKWARDS, - user_id: Optional[str] = None, - room_id: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], int]: - """Retrieve a paginated list of event reports - - Args: - start: event offset to begin the query from - limit: number of rows to retrieve - direction: Whether to fetch the most recent first (backwards) or the - oldest first (forwards) - user_id: search for user_id. Ignored if user_id is None - room_id: search for room_id. Ignored if room_id is None - Returns: - Tuple of: - json list of event reports - total number of event reports matching the filter criteria - """ - - def _get_event_reports_paginate_txn( - txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: - filters = [] - args: List[object] = [] - - if user_id: - filters.append("er.user_id LIKE ?") - args.extend(["%" + user_id + "%"]) - if room_id: - filters.append("er.room_id LIKE ?") - args.extend(["%" + room_id + "%"]) - - if direction == Direction.BACKWARDS: - order = "DESC" - else: - order = "ASC" - - where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" - - # We join on room_stats_state despite not using any columns from it - # because the join can influence the number of rows returned; - # e.g. a room that doesn't have state, maybe because it was deleted. - # The query returning the total count should be consistent with - # the query returning the results. - sql = """ - SELECT COUNT(*) as total_event_reports - FROM event_reports AS er - JOIN room_stats_state ON room_stats_state.room_id = er.room_id - {} - """.format( - where_clause - ) - txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] - - sql = """ - SELECT - er.id, - er.received_ts, - er.room_id, - er.event_id, - er.user_id, - er.content, - events.sender, - room_stats_state.canonical_alias, - room_stats_state.name - FROM event_reports AS er - LEFT JOIN events - ON events.event_id = er.event_id - JOIN room_stats_state - ON room_stats_state.room_id = er.room_id - {where_clause} - ORDER BY er.received_ts {order} - LIMIT ? - OFFSET ? - """.format( - where_clause=where_clause, - order=order, - ) - - args += [limit, start] - txn.execute(sql, args) - - event_reports = [] - for row in txn: - try: - s = db_to_json(row[5]).get("score") - r = db_to_json(row[5]).get("reason") - except Exception: - logger.error("Unable to parse json from event_reports: %s", row[0]) - continue - event_reports.append( - { - "id": row[0], - "received_ts": row[1], - "room_id": row[2], - "event_id": row[3], - "user_id": row[4], - "score": s, - "reason": r, - "sender": row[6], - "canonical_alias": row[7], - "name": row[8], - } - ) - - return event_reports, count - - return await self.db_pool.runInteraction( - "get_event_reports_paginate", _get_event_reports_paginate_txn - ) + return next_id async def block_room(self, room_id: str, user_id: str) -> None: """Marks the room as blocked. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 3abc4dcfef1a..1a7d5dd4335a 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -25,6 +25,7 @@ List, Mapping, Optional, + Sequence, Set, Tuple, Union, @@ -157,7 +158,7 @@ def _transact(txn: LoggingTransaction) -> int: return self._known_servers_count @cached(max_entries=100000, iterable=True) - async def get_users_in_room(self, room_id: str) -> List[str]: + async def get_users_in_room(self, room_id: str) -> Sequence[str]: """Returns a list of users in the room. Will return inaccurate results for rooms with partial state, since the state for @@ -194,9 +195,7 @@ def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[s ) @cached() - def get_user_in_room_with_profile( - self, room_id: str, user_id: str - ) -> Dict[str, ProfileInfo]: + def get_user_in_room_with_profile(self, room_id: str, user_id: str) -> ProfileInfo: raise NotImplementedError() @cachedList( @@ -250,7 +249,7 @@ def _get_subset_users_in_room_with_profiles( @cached(max_entries=100000, iterable=True) async def get_users_in_room_with_profiles( self, room_id: str - ) -> Dict[str, ProfileInfo]: + ) -> Mapping[str, ProfileInfo]: """Get a mapping from user ID to profile information for all users in a given room. The profile information comes directly from this room's `m.room.member` @@ -289,7 +288,7 @@ def _get_users_in_room_with_profiles( ) @cached(max_entries=100000) - async def get_room_summary(self, room_id: str) -> Dict[str, MemberSummary]: + async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. Args: @@ -380,7 +379,7 @@ def _get_number_joined_non_bot_users_in_room_txn( @cached() async def get_invited_rooms_for_local_user( self, user_id: str - ) -> List[RoomsForUser]: + ) -> Sequence[RoomsForUser]: """Get all the rooms the *local* user is invited to. Args: @@ -498,7 +497,7 @@ def _get_rooms_for_local_user_where_membership_is_txn( return results @cached(iterable=True) - async def get_local_users_in_room(self, room_id: str) -> List[str]: + async def get_local_users_in_room(self, room_id: str) -> Sequence[str]: """ Retrieves a list of the current roommembers who are local to the server. """ @@ -814,7 +813,7 @@ async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: """Returns the set of users who share a room with `user_id`""" room_ids = await self.get_rooms_for_user(user_id) - user_who_share_room = set() + user_who_share_room: Set[str] = set() for room_id in room_ids: user_ids = await self.get_users_in_room(room_id) user_who_share_room.update(user_ids) @@ -976,7 +975,7 @@ async def _check_host_room_membership( return True @cached(iterable=True, max_entries=10000) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: + async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state.""" # First we check if we already have `get_users_in_room` in the cache, as diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 3fe433f66ccc..a7aae661d860 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -122,7 +122,6 @@ def store_search_entries_txn( class SearchBackgroundUpdateStore(SearchWorkerStore): - EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" @@ -615,7 +614,6 @@ async def search_rooms( """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): - # We use CROSS JOIN here to ensure we use the right indexes. # https://sqlite.org/optoverview.html#crossjoin # diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index 05da15074a73..5dcb1fc0b5f4 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Dict, List, Tuple +from typing import Collection, Dict, List, Mapping, Tuple from unpaddedbase64 import encode_base64 @@ -26,7 +26,7 @@ class SignatureWorkerStore(EventsWorkerStore): @cached() - def get_event_reference_hash(self, event_id: str) -> Dict[str, Dict[str, bytes]]: + def get_event_reference_hash(self, event_id: str) -> Mapping[str, bytes]: # This is a dummy function to allow get_event_reference_hashes # to use its cache raise NotImplementedError() @@ -36,7 +36,7 @@ def get_event_reference_hash(self, event_id: str) -> Dict[str, Dict[str, bytes]] ) async def get_event_reference_hashes( self, event_ids: Collection[str] - ) -> Dict[str, Dict[str, bytes]]: + ) -> Mapping[str, Mapping[str, bytes]]: """Get all hashes for given events. Args: diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index ba325d390b58..ebb2ae964f5a 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -490,7 +490,6 @@ def _update_state_for_partial_state_event_txn( class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): - CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index d7b7d0c3c909..d3393d8e493e 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -461,7 +461,7 @@ def _upsert_with_additive_relatives_txn( insert_cols = [] qargs = [] - for (key, val) in chain( + for key, val in chain( keyvalues.items(), absolutes.items(), additive_relatives.items() ): insert_cols.append(key) diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index d5500cdd470c..c149a9eacba7 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import Any, Dict, Iterable, List, Tuple, cast +from typing import Any, Dict, Iterable, List, Mapping, Tuple, cast from synapse.api.constants import AccountDataTypes from synapse.replication.tcp.streams import AccountDataStream @@ -32,7 +32,9 @@ class TagsWorkerStore(AccountDataWorkerStore): @cached() - async def get_tags_for_user(self, user_id: str) -> Dict[str, Dict[str, JsonDict]]: + async def get_tags_for_user( + self, user_id: str + ) -> Mapping[str, Mapping[str, JsonDict]]: """Get all the tags for a user. @@ -107,7 +109,7 @@ def get_all_updated_tags_txn( async def get_updated_tags( self, user_id: str, stream_id: int - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonDict]]: """Get all the tags for the rooms where the tags have changed since the given version diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 6b33d809b6cf..c3bd36efc9c2 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -224,7 +224,7 @@ async def set_destination_retry_timings( await self.db_pool.runInteraction( "set_destination_retry_timings", - self._set_destination_retry_timings_native, + self._set_destination_retry_timings_txn, destination, failure_ts, retry_last_ts, @@ -232,7 +232,7 @@ async def set_destination_retry_timings( db_autocommit=True, # Safe as it's a single upsert ) - def _set_destination_retry_timings_native( + def _set_destination_retry_timings_txn( self, txn: LoggingTransaction, destination: str, @@ -266,58 +266,6 @@ def _set_destination_retry_timings_native( txn, self.get_destination_retry_timings, (destination,) ) - def _set_destination_retry_timings_emulated( - self, - txn: LoggingTransaction, - destination: str, - failure_ts: Optional[int], - retry_last_ts: int, - retry_interval: int, - ) -> None: - self.database_engine.lock_table(txn, "destinations") - - # We need to be careful here as the data may have changed from under us - # due to a worker setting the timings. - - prev_row = self.db_pool.simple_select_one_txn( - txn, - table="destinations", - keyvalues={"destination": destination}, - retcols=("failure_ts", "retry_last_ts", "retry_interval"), - allow_none=True, - ) - - if not prev_row: - self.db_pool.simple_insert_txn( - txn, - table="destinations", - values={ - "destination": destination, - "failure_ts": failure_ts, - "retry_last_ts": retry_last_ts, - "retry_interval": retry_interval, - }, - ) - elif ( - retry_interval == 0 - or prev_row["retry_interval"] is None - or prev_row["retry_interval"] < retry_interval - ): - self.db_pool.simple_update_one_txn( - txn, - "destinations", - keyvalues={"destination": destination}, - updatevalues={ - "failure_ts": failure_ts, - "retry_last_ts": retry_last_ts, - "retry_interval": retry_interval, - }, - ) - - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - async def store_destination_rooms_entries( self, destinations: Iterable[str], @@ -573,7 +521,6 @@ async def get_destination_rooms_paginate( def get_destination_rooms_paginate_txn( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: - if direction == Direction.BACKWARDS: order = "DESC" else: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 14ef5b040db1..97f09b73dde6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -14,11 +14,12 @@ import logging import re +import unicodedata from typing import ( TYPE_CHECKING, - Dict, Iterable, List, + Mapping, Optional, Sequence, Set, @@ -53,6 +54,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import ( JsonDict, + UserID, UserProfile, get_domain_from_id, get_localpart_from_id, @@ -98,7 +100,6 @@ def __init__( async def _populate_user_directory_createtables( self, progress: JsonDict, batch_size: int ) -> int: - # Get all the rooms that we want to process. def _make_staging_area(txn: LoggingTransaction) -> None: sql = ( @@ -473,11 +474,116 @@ async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> boo return False + async def set_remote_user_profile_in_user_dir_stale( + self, user_id: str, next_try_at_ms: int, retry_counter: int + ) -> None: + """ + Marks a remote user as having a possibly-stale user directory profile. + + Args: + user_id: the remote user who may have a stale profile on this server. + next_try_at_ms: timestamp in ms after which the user directory profile can be + refreshed. + retry_counter: number of failures in refreshing the profile so far. Used for + exponential backoff calculations. + """ + assert not self.hs.is_mine_id( + user_id + ), "Can't mark a local user as a stale remote user." + + server_name = UserID.from_string(user_id).domain + + await self.db_pool.simple_upsert( + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + values={ + "next_try_at_ts": next_try_at_ms, + "retry_counter": retry_counter, + "user_server_name": server_name, + }, + desc="set_remote_user_profile_in_user_dir_stale", + ) + + async def clear_remote_user_profile_in_user_dir_stale(self, user_id: str) -> None: + """ + Marks a remote user as no longer having a possibly-stale user directory profile. + + Args: + user_id: the remote user who no longer has a stale profile on this server. + """ + await self.db_pool.simple_delete( + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + desc="clear_remote_user_profile_in_user_dir_stale", + ) + + async def get_remote_servers_with_profiles_to_refresh( + self, now_ts: int, limit: int + ) -> List[str]: + """ + Get a list of up to `limit` server names which have users whose + locally-cached profiles we believe to be stale + and are refreshable given the current time `now_ts` in milliseconds. + """ + + def _get_remote_servers_with_refreshable_profiles_txn( + txn: LoggingTransaction, + ) -> List[str]: + sql = """ + SELECT user_server_name + FROM user_directory_stale_remote_users + WHERE next_try_at_ts < ? + GROUP BY user_server_name + ORDER BY MIN(next_try_at_ts), user_server_name + LIMIT ? + """ + txn.execute(sql, (now_ts, limit)) + return [row[0] for row in txn] + + return await self.db_pool.runInteraction( + "get_remote_servers_with_profiles_to_refresh", + _get_remote_servers_with_refreshable_profiles_txn, + ) + + async def get_remote_users_to_refresh_on_server( + self, server_name: str, now_ts: int, limit: int + ) -> List[Tuple[str, int, int]]: + """ + Get a list of up to `limit` user IDs from the server `server_name` + whose locally-cached profiles we believe to be stale + and are refreshable given the current time `now_ts` in milliseconds. + + Returns: + tuple of: + - User ID + - Retry counter (number of failures so far) + - Time the retry is scheduled for, in milliseconds + """ + + def _get_remote_users_to_refresh_on_server_txn( + txn: LoggingTransaction, + ) -> List[Tuple[str, int, int]]: + sql = """ + SELECT user_id, retry_counter, next_try_at_ts + FROM user_directory_stale_remote_users + WHERE user_server_name = ? AND next_try_at_ts < ? + ORDER BY next_try_at_ts + LIMIT ? + """ + txn.execute(sql, (server_name, now_ts, limit)) + return cast(List[Tuple[str, int, int]], txn.fetchall()) + + return await self.db_pool.runInteraction( + "get_remote_users_to_refresh_on_server", + _get_remote_users_to_refresh_on_server_txn, + ) + async def update_profile_in_user_dir( self, user_id: str, display_name: Optional[str], avatar_url: Optional[str] ) -> None: """ Update or add a user's profile in the user directory. + If the user is remote, the profile will be marked as not stale. """ # If the display name or avatar URL are unexpected types, replace with None. display_name = non_null_str_or_none(display_name) @@ -491,6 +597,19 @@ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: values={"display_name": display_name, "avatar_url": avatar_url}, ) + if not self.hs.is_mine_id(user_id): + # Remote users: Make sure the profile is not marked as stale anymore. + self.db_pool.simple_delete_txn( + txn, + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + ) + + # The display name that goes into the database index. + index_display_name = display_name + if index_display_name is not None: + index_display_name = _filter_text_for_index(index_display_name) + if isinstance(self.database_engine, PostgresEngine): # We weight the localpart most highly, then display name and finally # server name @@ -508,11 +627,15 @@ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: user_id, get_localpart_from_id(user_id), get_domain_from_id(user_id), - display_name, + index_display_name, ), ) elif isinstance(self.database_engine, Sqlite3Engine): - value = "%s %s" % (user_id, display_name) if display_name else user_id + value = ( + "%s %s" % (user_id, index_display_name) + if index_display_name + else user_id + ) self.db_pool.simple_upsert_txn( txn, table="user_directory_search", @@ -586,7 +709,7 @@ def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None: ) @cached() - async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, str]]: + async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]: return await self.db_pool.simple_select_one( table="user_directory", keyvalues={"user_id": user_id}, @@ -897,6 +1020,41 @@ async def search_user_dir( return {"limited": limited, "results": results[0:limit]} +def _filter_text_for_index(text: str) -> str: + """Transforms text before it is inserted into the user directory index, or searched + for in the user directory index. + + Note that the user directory search table needs to be rebuilt whenever this function + changes. + """ + # Lowercase the text, to make searches case-insensitive. + # This is necessary for both PostgreSQL and SQLite. PostgreSQL's + # `to_tsquery/to_tsvector` functions don't lowercase non-ASCII characters when using + # the "C" collation, while SQLite just doesn't lowercase non-ASCII characters at + # all. + text = text.lower() + + # Normalize the text. NFKC normalization has two effects: + # 1. It canonicalizes the text, ie. maps all visually identical strings to the same + # string. For example, ["e", "◌́"] is mapped to ["é"]. + # 2. It maps strings that are roughly equivalent to the same string. + # For example, ["dž"] is mapped to ["d", "ž"], ["①"] to ["1"] and ["i⁹"] to + # ["i", "9"]. + text = unicodedata.normalize("NFKC", text) + + # Note that nothing is done to make searches accent-insensitive. + # That could be achieved by converting to NFKD form instead (with combining accents + # split out) and filtering out combining accents using `unicodedata.combining(c)`. + # The downside of this may be noisier search results, since search terms with + # explicit accents will match characters with no accents, or completely different + # accents. + # + # text = unicodedata.normalize("NFKD", text) + # text = "".join([c for c in text if not unicodedata.combining(c)]) + + return text + + def _parse_query_sqlite(search_term: str) -> str: """Takes a plain unicode string from the user and converts it into a form that can be passed to database. @@ -906,6 +1064,7 @@ def _parse_query_sqlite(search_term: str) -> str: We specifically add both a prefix and non prefix matching term so that exact matches get ranked higher. """ + search_term = _filter_text_for_index(search_term) # Pull out the individual words, discarding any non-word characters. results = _parse_words(search_term) @@ -918,11 +1077,21 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: We use this so that we can add prefix matching, which isn't something that is supported by default. """ - results = _parse_words(search_term) + search_term = _filter_text_for_index(search_term) + + escaped_words = [] + for word in _parse_words(search_term): + # Postgres tsvector and tsquery quoting rules: + # words potentially containing punctuation should be quoted + # and then existing quotes and backslashes should be doubled + # See: https://www.postgresql.org/docs/current/datatype-textsearch.html#DATATYPE-TSQUERY - both = " & ".join("(%s:* | %s)" % (result, result) for result in results) - exact = " & ".join("%s" % (result,) for result in results) - prefix = " & ".join("%s:*" % (result,) for result in results) + quoted_word = word.replace("'", "''").replace("\\", "\\\\") + escaped_words.append(f"'{quoted_word}'") + + both = " & ".join("(%s:* | %s)" % (word, word) for word in escaped_words) + exact = " & ".join("%s" % (word,) for word in escaped_words) + prefix = " & ".join("%s:*" % (word,) for word in escaped_words) return both, exact, prefix @@ -944,6 +1113,14 @@ def _parse_words(search_term: str) -> List[str]: if USE_ICU: return _parse_words_with_icu(search_term) + return _parse_words_with_regex(search_term) + + +def _parse_words_with_regex(search_term: str) -> List[str]: + """ + Break down search term into words, when we don't have ICU available. + See: `_parse_words` + """ return re.findall(r"([\w\-]+)", search_term, re.UNICODE) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index d743282f13a3..097dea51828c 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -251,7 +251,6 @@ def _get_state_groups_from_groups_txn( class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): - STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 1a7232b27665..29ff64e8764a 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -18,6 +18,8 @@ import attr from synapse.api.constants import EventTypes +from synapse.events import EventBase +from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -257,14 +259,11 @@ async def _get_state_for_groups( member_filter, non_member_filter = state_filter.get_member_split() # Now we look them up in the member and non-member caches - ( - non_member_state, - incomplete_groups_nm, - ) = self._get_state_for_groups_using_cache( + non_member_state, incomplete_groups_nm = self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) - (member_state, incomplete_groups_m,) = self._get_state_for_groups_using_cache( + member_state, incomplete_groups_m = self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) @@ -404,6 +403,123 @@ def _insert_into_cache( fetched_keys=non_member_types, ) + async def store_state_deltas_for_batched( + self, + events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], + room_id: str, + prev_group: int, + ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + """Generate and store state deltas for a group of events and contexts created to be + batch persisted. Note that all the events must be in a linear chain (ie a <- b <- c). + + Args: + events_and_context: the events to generate and store a state groups for + and their associated contexts + room_id: the id of the room the events were created for + prev_group: the state group of the last event persisted before the batched events + were created + """ + + def insert_deltas_group_txn( + txn: LoggingTransaction, + events_and_context: List[Tuple[EventBase, UnpersistedEventContext]], + prev_group: int, + ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + """Generate and store state groups for the provided events and contexts. + + Requires that we have the state as a delta from the last persisted state group. + + Returns: + A list of state groups + """ + is_in_db = self.db_pool.simple_select_one_onecol_txn( + txn, + table="state_groups", + keyvalues={"id": prev_group}, + retcol="id", + allow_none=True, + ) + if not is_in_db: + raise Exception( + "Trying to persist state with unpersisted prev_group: %r" + % (prev_group,) + ) + + num_state_groups = sum( + 1 for event, _ in events_and_context if event.is_state() + ) + + state_groups = self._state_group_seq_gen.get_next_mult_txn( + txn, num_state_groups + ) + + sg_before = prev_group + state_group_iter = iter(state_groups) + for event, context in events_and_context: + if not event.is_state(): + context.state_group_after_event = sg_before + context.state_group_before_event = sg_before + continue + + sg_after = next(state_group_iter) + context.state_group_after_event = sg_after + context.state_group_before_event = sg_before + context.state_delta_due_to_event = { + (event.type, event.state_key): event.event_id + } + sg_before = sg_after + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups", + keys=("id", "room_id", "event_id"), + values=[ + (context.state_group_after_event, room_id, event.event_id) + for event, context in events_and_context + if event.is_state() + ], + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_group_edges", + keys=("state_group", "prev_state_group"), + values=[ + ( + context.state_group_after_event, + context.state_group_before_event, + ) + for event, context in events_and_context + if event.is_state() + ], + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + ( + context.state_group_after_event, + room_id, + key[0], + key[1], + state_id, + ) + for event, context in events_and_context + if context.state_delta_due_to_event is not None + for key, state_id in context.state_delta_due_to_event.items() + ], + ) + return events_and_context + + return await self.db_pool.runInteraction( + "store_state_deltas_for_batched.insert_deltas_group", + insert_deltas_group_txn, + events_and_context, + prev_group, + ) + async def store_state_group( self, event_id: str, @@ -689,12 +805,14 @@ async def purge_room_state( state_groups_to_delete: State groups to delete """ + logger.info("[purge] Starting state purge") await self.db_pool.runInteraction( "purge_room_state", self._purge_room_state_txn, room_id, state_groups_to_delete, ) + logger.info("[purge] Done with state purge") def _purge_room_state_txn( self, diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index a182e8a098b1..d1ccb7390a02 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -25,7 +25,7 @@ except ImportError: class PostgresEngine(BaseDatabaseEngine): # type: ignore[no-redef] - def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: raise RuntimeError( f"Cannot create {cls.__name__} -- psycopg2 module is not installed" ) @@ -36,7 +36,7 @@ def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[m except ImportError: class Sqlite3Engine(BaseDatabaseEngine): # type: ignore[no-redef] - def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: raise RuntimeError( f"Cannot create {cls.__name__} -- sqlite3 module is not installed" ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 3acdb39da75c..2a1c6fa31bc2 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -23,7 +23,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor @@ -108,9 +108,14 @@ def prepare_database( # so we start one before running anything. This ensures that any upgrades # are either applied completely, or not at all. # - # (psycopg2 automatically starts a transaction as soon as we run any statements - # at all, so this is redundant but harmless there.) - cur.execute("BEGIN TRANSACTION") + # psycopg2 does not automatically start transactions when in autocommit mode. + # While it is technically harmless to nest transactions in postgres, doing so + # results in a warning in Postgres' logs per query. And we'd rather like to + # avoid doing that. + if isinstance(database_engine, Sqlite3Engine) or ( + isinstance(database_engine, PostgresEngine) and db_conn.autocommit + ): + cur.execute("BEGIN TRANSACTION") logger.info("%r: Checking existing schema version", databases) version_info = _get_or_create_schema_state(cur, database_engine) @@ -558,7 +563,7 @@ def _apply_module_schemas( """ # This is the old way for password_auth_provider modules to make changes # to the database. This should instead be done using the module API - for (mod, _config) in config.authproviders.password_providers: + for mod, _config in config.authproviders.password_providers: if not hasattr(mod, "get_db_schema_files"): continue modname = ".".join((mod.__module__, mod.__name__)) @@ -586,7 +591,7 @@ def _apply_module_schema_files( (modname,), ) applied_deltas = {d for d, in cur} - for (name, stream) in names_and_streams: + for name, stream in names_and_streams: if name in applied_deltas: continue diff --git a/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql new file mode 100644 index 000000000000..dcb38f3d7b68 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql @@ -0,0 +1,39 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Table containing a list of remote users whose profiles may have changed +-- since their last update in the user directory. +CREATE TABLE user_directory_stale_remote_users ( + -- The User ID of the remote user whose profile may be stale. + user_id TEXT NOT NULL PRIMARY KEY, + + -- The server name of the user. + user_server_name TEXT NOT NULL, + + -- The timestamp (in ms) after which we should next try to request the user's + -- latest profile. + next_try_at_ts BIGINT NOT NULL, + + -- The number of retries so far. + -- 0 means we have not yet attempted to refresh the profile. + -- Used for calculating exponential backoff. + retry_counter INTEGER NOT NULL +); + +-- Create an index so we can easily query upcoming servers to try. +CREATE INDEX user_directory_stale_remote_users_next_try_idx ON user_directory_stale_remote_users(next_try_at_ts, user_server_name); + +-- Create an index so we can easily query upcoming users to try for a particular server. +CREATE INDEX user_directory_stale_remote_users_next_try_by_server_idx ON user_directory_stale_remote_users(user_server_name, next_try_at_ts); diff --git a/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres new file mode 100644 index 000000000000..cc7dda1a1136 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres @@ -0,0 +1,52 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +--- destinations +COMMENT ON TABLE destinations IS + 'Information about remote homeservers and the health of our connection to them.'; + +COMMENT ON COLUMN destinations.destination IS 'server name of remote homeserver in question'; + +COMMENT ON COLUMN destinations.last_successful_stream_ordering IS +$$Stream ordering of the most recently successfully sent PDU to this server, sent through normal send (not e.g. backfill). +In Catch-Up Mode, the original PDU persisted by us is represented here, even if we sent a later forward extremity in its stead. +See `destination_rooms` for more information about catch-up.$$; + +COMMENT ON COLUMN destinations.retry_last_ts IS +$$The last time we tried and failed to reach the remote server, in ms. +This field is reset to `0` when we succeed in connecting again.$$; + +COMMENT ON COLUMN destinations.retry_interval IS +$$How long, in milliseconds, to wait since the last time we tried to reach the remote server before trying again. +This field is reset to `0` when we succeed in connecting again.$$; + +COMMENT ON COLUMN destinations.failure_ts IS +$$The first time we tried and failed to reach the remote server, in ms. +This field is reset to `NULL` when we succeed in connecting again.$$; + + + +--- destination_rooms +COMMENT ON TABLE destination_rooms IS + 'Information about transmission of PDUs in a given room to a given remote homeserver.'; + +COMMENT ON COLUMN destination_rooms.destination IS 'server name of remote homeserver in question'; + +COMMENT ON COLUMN destination_rooms.room_id IS 'room ID in question'; + +COMMENT ON COLUMN destination_rooms.stream_ordering IS +$$`stream_ordering` of the most recent PDU in this room that needs to be sent (by us) to this homeserver. +This can only be pointing to our own PDU because we are only responsible for sending our own PDUs.$$; diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 0031df1e0649..56a00485393d 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. from types import TracebackType -from typing import Any, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Any, + Callable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) from typing_extensions import Protocol @@ -112,15 +123,35 @@ class DBAPI2Module(Protocol): # extends from this hierarchy. See # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#exceptions # https://www.postgresql.org/docs/current/errcodes-appendix.html#ERRCODES-TABLE - Warning: Type[Exception] - Error: Type[Exception] + # + # Note: rather than + # x: T + # we write + # @property + # def x(self) -> T: ... + # which expresses that the protocol attribute `x` is read-only. The mypy docs + # https://mypy.readthedocs.io/en/latest/common_issues.html#covariant-subtyping-of-mutable-protocol-members-is-rejected + # explain why this is necessary for safety. TL;DR: we shouldn't be able to write + # to `x`, only read from it. See also https://github.com/python/mypy/issues/6002 . + @property + def Warning(self) -> Type[Exception]: + ... + + @property + def Error(self) -> Type[Exception]: + ... # Errors are divided into `InterfaceError`s (something went wrong in the database # driver) and `DatabaseError`s (something went wrong in the database). These are # both subclasses of `Error`, but we can't currently express this in type # annotations due to https://github.com/python/mypy/issues/8397 - InterfaceError: Type[Exception] - DatabaseError: Type[Exception] + @property + def InterfaceError(self) -> Type[Exception]: + ... + + @property + def DatabaseError(self) -> Type[Exception]: + ... # Everything below is a subclass of `DatabaseError`. @@ -128,7 +159,9 @@ class DBAPI2Module(Protocol): # - An integer was too big for its data type. # - An invalid date time was provided. # - A string contained a null code point. - DataError: Type[Exception] + @property + def DataError(self) -> Type[Exception]: + ... # Roughly: something went wrong in the database, but it's not within the application # programmer's control. Examples: @@ -138,28 +171,45 @@ class DBAPI2Module(Protocol): # - A serialisation failure occurred. # - The database ran out of resources, such as storage, memory, connections, etc. # - The database encountered an error from the operating system. - OperationalError: Type[Exception] + @property + def OperationalError(self) -> Type[Exception]: + ... # Roughly: we've given the database data which breaks a rule we asked it to enforce. # Examples: # - Stop, criminal scum! You violated the foreign key constraint # - Also check constraints, non-null constraints, etc. - IntegrityError: Type[Exception] + @property + def IntegrityError(self) -> Type[Exception]: + ... # Roughly: something went wrong within the database server itself. - InternalError: Type[Exception] + @property + def InternalError(self) -> Type[Exception]: + ... # Roughly: the application did something silly that needs to be fixed. Examples: # - We don't have permissions to do something. # - We tried to create a table with duplicate column names. # - We tried to use a reserved name. # - We referred to a column that doesn't exist. - ProgrammingError: Type[Exception] + @property + def ProgrammingError(self) -> Type[Exception]: + ... # Roughly: we've tried to do something that this database doesn't support. - NotSupportedError: Type[Exception] + @property + def NotSupportedError(self) -> Type[Exception]: + ... - def connect(self, **parameters: object) -> Connection: + # We originally wrote + # def connect(self, *args, **kwargs) -> Connection: ... + # But mypy doesn't seem to like that because sqlite3.connect takes a mandatory + # positional argument. We can't make that part of the signature though, because + # psycopg2.connect doesn't have a mandatory positional argument. Instead, we use + # the following slightly unusual workaround. + @property + def connect(self) -> Callable[..., Connection]: ... diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 9adff3f4f523..d2c874b9a8d7 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -93,8 +93,11 @@ def _load_current_id( return res -class AbstractStreamIdTracker(metaclass=abc.ABCMeta): - """Tracks the "current" stream ID of a stream that may have multiple writers. +class AbstractStreamIdGenerator(metaclass=abc.ABCMeta): + """Generates or tracks stream IDs for a stream that may have multiple writers. + + Each stream ID represents a write transaction, whose completion is tracked + so that the "current" stream ID of the stream can be determined. Stream IDs are monotonically increasing or decreasing integers representing write transactions. The "current" stream ID is the stream ID such that all transactions @@ -130,16 +133,6 @@ def get_current_token_for_writer(self, instance_name: str) -> int: """ raise NotImplementedError() - -class AbstractStreamIdGenerator(AbstractStreamIdTracker): - """Generates stream IDs for a stream that may have multiple writers. - - Each stream ID represents a write transaction, whose completion is tracked - so that the "current" stream ID of the stream can be determined. - - See `AbstractStreamIdTracker` for more details. - """ - @abc.abstractmethod def get_next(self) -> AsyncContextManager[int]: """ @@ -158,6 +151,15 @@ def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]: """ raise NotImplementedError() + @abc.abstractmethod + def get_next_txn(self, txn: LoggingTransaction) -> int: + """ + Usage: + stream_id_gen.get_next_txn(txn) + # ... persist events ... + """ + raise NotImplementedError() + class StreamIdGenerator(AbstractStreamIdGenerator): """Generates and tracks stream IDs for a stream with a single writer. @@ -263,6 +265,40 @@ def manager() -> Generator[Sequence[int], None, None]: return _AsyncCtxManagerWrapper(manager()) + def get_next_txn(self, txn: LoggingTransaction) -> int: + """ + Retrieve the next stream ID from within a database transaction. + + Clean-up functions will be called when the transaction finishes. + + Args: + txn: The database transaction object. + + Returns: + The next stream ID. + """ + if not self._is_writer: + raise Exception("Tried to allocate stream ID on non-writer") + + # Get the next stream ID. + with self._lock: + self._current += self._step + next_id = self._current + + self._unfinished_ids[next_id] = next_id + + def clear_unfinished_id(id_to_clear: int) -> None: + """A function to mark processing this ID as finished""" + with self._lock: + self._unfinished_ids.pop(id_to_clear) + + # Mark this ID as finished once the database transaction itself finishes. + txn.call_after(clear_unfinished_id, next_id) + txn.call_on_exception(clear_unfinished_id, next_id) + + # Return the new ID. + return next_id + def get_current_token(self) -> int: if not self._is_writer: return self._current @@ -568,7 +604,7 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: """ Usage: - stream_id = stream_id_gen.get_next(txn) + stream_id = stream_id_gen.get_next_txn(txn) # ... persist event ... """ diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 75268cbe1595..80915216de94 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -205,7 +205,7 @@ def __init__(self, get_first_callback: GetFirstCallbackType): """ Args: get_first_callback: a callback which is called on the first call to - get_next_id_txn; should return the curreent maximum id + get_next_id_txn; should return the current maximum id """ # the callback. this is cleared after it is called, so that it can be GCed. self._callback: Optional[GetFirstCallbackType] = get_first_callback diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index c6c8a0315c9b..8a48ffc48ddf 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from abc import ABC, abstractmethod from typing import Generic, List, Optional, Tuple, TypeVar from synapse.types import StrCollection, UserID @@ -22,7 +22,8 @@ R = TypeVar("R") -class EventSource(Generic[K, R]): +class EventSource(ABC, Generic[K, R]): + @abstractmethod async def get_new_events( self, user: UserID, @@ -32,4 +33,4 @@ async def get_new_events( is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[R], K]: - ... + raise NotImplementedError() diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index f82d1cfc298b..33363867c456 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -69,6 +69,9 @@ MutableStateMap = MutableMapping[StateKey, T] # JSON types. These could be made stronger, but will do for now. +# A "simple" (canonical) JSON value. +SimpleJsonValue = Optional[Union[str, int, bool]] +JsonValue = Union[List[SimpleJsonValue], Tuple[SimpleJsonValue, ...], SimpleJsonValue] # A JSON-serialisable dict. JsonDict = Dict[str, Any] # A JSON-serialisable mapping; roughly speaking an immutable JSONDict. diff --git a/synapse/types/state.py b/synapse/types/state.py index 743a4f9217ee..4b3071acce32 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -120,7 +120,7 @@ def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": def to_types(self) -> Iterable[Tuple[str, Optional[str]]]: """The inverse to `from_types`.""" - for (event_type, state_keys) in self.types.items(): + for event_type, state_keys in self.types.items(): if state_keys is None: yield event_type, None else: diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 9387632d0d74..6ffa56217eb9 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -98,7 +98,6 @@ class EvictionReason(Enum): @attr.s(slots=True, auto_attribs=True) class CacheMetric: - _cache: Sized _cache_type: str _cache_name: str diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 3b1e20570020..1c0fde4966e7 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -183,7 +183,7 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled = [] errors = [] - for (requirement, must_be_installed) in dependencies: + for requirement, must_be_installed in dependencies: try: dist: metadata.Distribution = metadata.distribution(requirement.name) except metadata.PackageNotFoundError: diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index f97f98a05742..d00d34e652a0 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -211,7 +211,6 @@ def check_yield_points_inner( result = Failure() if current_context() != expected_context: - # This happens because the context is lost sometime *after* the # previous yield and *after* the current yield. E.g. the # deferred we waited on didn't follow the rules, or we forgot to diff --git a/synapse/visibility.py b/synapse/visibility.py index e442de31739e..468e22f8f64e 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -14,7 +14,17 @@ # limitations under the License. import logging from enum import Enum, auto -from typing import Collection, Dict, FrozenSet, List, Optional, Tuple +from typing import ( + Collection, + Dict, + FrozenSet, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr from typing_extensions import Final @@ -565,29 +575,43 @@ async def filter_events_for_server( storage: StorageControllers, target_server_name: str, local_server_name: str, - events: List[EventBase], - redact: bool = True, - check_history_visibility_only: bool = False, + events: Sequence[EventBase], + *, + redact: bool, + filter_out_erased_senders: bool, + filter_out_remote_partial_state_events: bool, ) -> List[EventBase]: - """Filter a list of events based on whether given server is allowed to + """Filter a list of events based on whether the target server is allowed to see them. + For a fully stated room, the target server is allowed to see an event E if: + - the state at E has world readable or shared history vis, OR + - the state at E says that the target server is in the room. + + For a partially stated room, the target server is allowed to see E if: + - E was created by this homeserver, AND: + - the partial state at E has world readable or shared history vis, OR + - the partial state at E says that the target server is in the room. + + TODO: state before or state after? + Args: storage - server_name + target_server_name + local_server_name events - redact: Whether to return a redacted version of the event, or - to filter them out entirely. - check_history_visibility_only: Whether to only check the - history visibility, rather than things like if the sender has been + redact: Controls what to do with events which have been filtered out. + If True, include their redacted forms; if False, omit them entirely. + filter_out_erased_senders: If true, also filter out events whose sender has been erased. This is used e.g. during pagination to decide whether to backfill or not. - + filter_out_remote_partial_state_events: If True, also filter out events in + partial state rooms created by other homeservers. Returns The filtered events. """ - def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool: + def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool: if erased_senders and erased_senders[event.sender]: logger.info("Sender of %s has been erased, redacting", event.event_id) return True @@ -616,7 +640,7 @@ def check_event_is_visible( # server has no users in the room: redact return False - if not check_history_visibility_only: + if filter_out_erased_senders: erased_senders = await storage.main.are_users_erased(e.sender for e in events) else: # We don't want to check whether users are erased, which is equivalent @@ -631,15 +655,15 @@ def check_event_is_visible( # otherwise a room could be fully joined after we retrieve those, which would then bypass # this check but would base the filtering on an outdated view of the membership events. - partial_state_invisible_events = set() - if not check_history_visibility_only: + partial_state_invisible_event_ids: Set[str] = set() + if filter_out_remote_partial_state_events: for e in events: sender_domain = get_domain_from_id(e.sender) if ( sender_domain != local_server_name and await storage.main.is_partial_state_room(e.room_id) ): - partial_state_invisible_events.add(e) + partial_state_invisible_event_ids.add(e.event_id) # Let's check to see if all the events have a history visibility # of "shared" or "world_readable". If that's the case then we don't @@ -658,17 +682,20 @@ def check_event_is_visible( target_server_name, ) - to_return = [] - for e in events: + def include_event_in_output(e: EventBase) -> bool: erased = is_sender_erased(e, erased_senders) visible = check_event_is_visible( event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) ) - if e in partial_state_invisible_events: + if e.event_id in partial_state_invisible_event_ids: visible = False - if visible and not erased: + return visible and not erased + + to_return = [] + for e in events: + if include_event_in_output(e): to_return.append(e) elif redact: to_return.append(prune_event(e)) diff --git a/synmark/__main__.py b/synmark/__main__.py index 35a59e347a46..19de6391877f 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -34,12 +34,10 @@ def make_test(main): """ def _main(loops): - reactor = make_reactor() file_out = StringIO() with redirect_stderr(file_out): - d = Deferred() d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 9419892e957c..8beb077e0a33 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -30,7 +30,6 @@ class LineCounter(LineOnlyReceiver): - delimiter = b"\n" def __init__(self, *args, **kwargs): diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 5d89ba94ad8f..2ee343d8a4ad 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -67,7 +67,9 @@ def test_openid_listener(self, names: List[str], expectation: str) -> None: } # Listen with the config - self.hs._listen_http(parse_listener_def(0, config)) + hs = self.hs + assert isinstance(hs, GenericWorkerServer) + hs._listen_http(parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] @@ -115,7 +117,9 @@ def test_openid_listener(self, names: List[str], expectation: str) -> None: } # Listen with the config - self.hs._listener_http(self.hs.config, parse_listener_def(0, config)) + hs = self.hs + assert isinstance(hs, SynapseHomeServer) + hs._listener_http(self.hs.config, parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index febcc1499d06..e2a3bad065da 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -11,12 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, cast +from typing import List, Optional, Sequence, Tuple, cast from unittest.mock import Mock from typing_extensions import TypeAlias from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.appservice import ( ApplicationService, @@ -40,9 +41,6 @@ from ..utils import MockClock -if TYPE_CHECKING: - from twisted.internet.testing import MemoryReactor - class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): def setUp(self) -> None: diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 0e8af2da54b5..1b9696748fdc 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -192,7 +192,7 @@ def test_verify_json_for_server(self) -> None: key1 = signedjson.key.generate_signing_key("1") r = self.hs.get_datastores().main.store_server_verify_keys( "server9", - time.time() * 1000, + int(time.time() * 1000), [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], ) self.get_success(r) @@ -287,7 +287,7 @@ def test_verify_json_for_server_with_null_valid_until_ms(self) -> None: key1 = signedjson.key.generate_signing_key("1") r = self.hs.get_datastores().main.store_server_verify_keys( "server9", - time.time() * 1000, + int(time.time() * 1000), # None is not a valid value in FetchKeyResult, but we're abusing this # API to insert null values into the database. The nulls get converted # to 0 when fetched in KeyStore.get_server_verify_keys. @@ -466,9 +466,9 @@ async def get_json(destination: str, path: str, **kwargs: Any) -> JsonDict: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], SERVER_NAME) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) @@ -584,9 +584,9 @@ def test_get_keys_from_perspectives(self) -> None: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) @@ -705,9 +705,9 @@ def test_get_perspectives_own_key(self) -> None: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index a9893def7422..6fb1f1bd6e31 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -31,7 +31,11 @@ from tests.handlers.test_sync import generate_sync_config from tests.test_utils import simple_async_mock -from tests.unittest import FederatingHomeserverTestCase, override_config +from tests.unittest import ( + FederatingHomeserverTestCase, + HomeserverTestCase, + override_config, +) @attr.s @@ -152,11 +156,11 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. - fed_transport_client = Mock(spec=["send_transaction"]) - fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client = Mock(spec=["send_transaction"]) + self.fed_transport_client.send_transaction = simple_async_mock({}) hs = self.setup_test_homeserver( - federation_transport_client=fed_transport_client, + federation_transport_client=self.fed_transport_client, ) load_legacy_presence_router(hs) @@ -418,7 +422,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: # # Thus we reset the mock, and try sending all online local user # presence again - self.hs.get_federation_transport_client().send_transaction.reset_mock() + self.fed_transport_client.send_transaction.reset_mock() # Broadcast local user online presence self.get_success( @@ -443,9 +447,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: } found_users = set() - calls = ( - self.hs.get_federation_transport_client().send_transaction.call_args_list - ) + calls = self.fed_transport_client.send_transaction.call_args_list for call in calls: call_args = call[0] federation_transaction: Transaction = call_args[0] @@ -470,7 +472,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: def send_presence_update( - testcase: FederatingHomeserverTestCase, + testcase: HomeserverTestCase, user_id: str, access_token: str, presence_state: str, @@ -491,7 +493,7 @@ def send_presence_update( def sync_presence( - testcase: FederatingHomeserverTestCase, + testcase: HomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, ) -> Tuple[List[UserPresenceState], StreamToken]: diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index d667dd27bf40..33af8770fde8 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -17,14 +17,13 @@ from synapse.api.errors import Codes, SynapseError from synapse.rest import admin from synapse.rest.client import login, room -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, UserID, create_requester from tests import unittest from tests.test_utils import make_awaitable class RoomComplexityTests(unittest.FederatingHomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, @@ -37,7 +36,6 @@ def default_config(self) -> JsonDict: return config def test_complexity_simple(self) -> None: - u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -56,7 +54,11 @@ def test_complexity_simple(self) -> None: # Artificially raise the complexity store = self.hs.get_datastores().main - store.get_current_state_event_counts = lambda x: make_awaitable(500 * 1.23) + + async def get_current_state_event_counts(room_id: str) -> int: + return int(500 * 1.23) + + store.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] # Get the room complexity again -- make sure it's our artificial value channel = self.make_signed_federation_request( @@ -67,7 +69,6 @@ def test_complexity_simple(self) -> None: self.assertEqual(complexity, 1.23) def test_join_too_large(self) -> None: - u1 = self.register_user("u1", "pass") handler = self.hs.get_room_member_handler() @@ -75,12 +76,12 @@ def test_join_too_large(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -106,12 +107,12 @@ def test_join_too_large_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -127,7 +128,6 @@ def test_join_too_large_admin(self) -> None: self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_join_too_large_once_joined(self) -> None: - u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -144,17 +144,18 @@ def test_join_too_large_once_joined(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) # Artificially raise the complexity - self.hs.get_datastores().main.get_current_state_event_counts = ( - lambda x: make_awaitable(600) - ) + async def get_current_state_event_counts(room_id: str) -> int: + return 600 + + self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], room_1, UserID.from_string(u1), @@ -200,12 +201,12 @@ def test_join_too_large_no_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -230,12 +231,12 @@ def test_join_too_large_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index a986b15f0a87..391ae5170729 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,11 +1,16 @@ -from typing import Callable, List, Optional, Tuple +from typing import Callable, Collection, List, Optional, Tuple +from unittest import mock from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.federation.sender import PerDestinationQueue, TransactionManager +from synapse.federation.sender import ( + FederationSender, + PerDestinationQueue, + TransactionManager, +) from synapse.federation.units import Edu, Transaction from synapse.rest import admin from synapse.rest.client import login, room @@ -33,8 +38,9 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock(spec=["send_transaction"]) return self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=self.federation_transport_client, ) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -52,10 +58,14 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.pdus: List[JsonDict] = [] self.failed_pdus: List[JsonDict] = [] self.is_online = True - self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction ) + federation_sender = hs.get_federation_sender() + assert isinstance(federation_sender, FederationSender) + self.federation_sender = federation_sender + def default_config(self) -> JsonDict: config = super().default_config() config["federation_sender_instances"] = None @@ -229,11 +239,11 @@ def test_catch_up_from_blank_state(self) -> None: # let's delete the federation transmission queue # (this pretends we are starting up fresh.) self.assertFalse( - self.hs.get_federation_sender() - ._per_destination_queues["host2"] - .transmission_loop_running + self.federation_sender._per_destination_queues[ + "host2" + ].transmission_loop_running ) - del self.hs.get_federation_sender()._per_destination_queues["host2"] + del self.federation_sender._per_destination_queues["host2"] # let's also clear any backoffs self.get_success( @@ -322,6 +332,7 @@ def test_catch_up_loop(self) -> None: # also fetch event 5 so we know its last_successful_stream_ordering later event_5 = self.get_success(self.hs.get_datastores().main.get_event(event_id_5)) + assert event_2.internal_metadata.stream_ordering is not None self.get_success( self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_2.internal_metadata.stream_ordering @@ -425,15 +436,16 @@ def test_catch_up_on_synapse_startup(self) -> None: def wake_destination_track(destination: str) -> None: woken.append(destination) - self.hs.get_federation_sender().wake_destination = wake_destination_track + self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment] # cancel the pre-existing timer for _wake_destinations_needing_catchup # this is because we are calling it manually rather than waiting for it # to be called automatically - self.hs.get_federation_sender()._catchup_after_startup_timer.cancel() + assert self.federation_sender._catchup_after_startup_timer is not None + self.federation_sender._catchup_after_startup_timer.cancel() self.get_success( - self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0 + self.federation_sender._wake_destinations_needing_catchup(), by=5.0 ) # ASSERT (_wake_destinations_needing_catchup): @@ -475,6 +487,7 @@ def test_not_latest_event(self) -> None: ) ) + assert event_1.internal_metadata.stream_ordering is not None self.get_success( self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_1.internal_metadata.stream_ordering @@ -488,3 +501,87 @@ def test_not_latest_event(self) -> None: self.assertEqual(len(sent_pdus), 1) self.assertEqual(sent_pdus[0].event_id, event_2.event_id) self.assertFalse(per_dest_queue._catching_up) + + def test_catch_up_is_not_blocked_by_remote_event_in_partial_state_room( + self, + ) -> None: + """Detects (part of?) https://github.com/matrix-org/synapse/issues/15220.""" + # ARRANGE: + # - a local user (u1) + # - a room which contains u1 and two remote users, @u2:host2 and @u3:other + # - events in that room such that + # - history visibility is restricted + # - u1 sent message events e1 and e2 + # - afterwards, u3 sent a remote event e3 + # - catchup to begin for host2; last successfully sent event was e1 + per_dest_queue, sent_pdus = self.make_fake_destination_queue() + + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room = self.helper.create_room_as("u1", tok=u1_token) + self.helper.send_state( + room_id=room, + event_type="m.room.history_visibility", + body={"history_visibility": "joined"}, + tok=u1_token, + ) + self.get_success( + event_injection.inject_member_event(self.hs, room, "@u2:host2", "join") + ) + self.get_success( + event_injection.inject_member_event(self.hs, room, "@u3:other", "join") + ) + + # create some events + event_id_1 = self.helper.send(room, "hello", tok=u1_token)["event_id"] + event_id_2 = self.helper.send(room, "world", tok=u1_token)["event_id"] + # pretend that u3 changes their displayname + event_id_3 = self.get_success( + event_injection.inject_member_event(self.hs, room, "@u3:other", "join") + ).event_id + + # destination_rooms should already be populated, but let us pretend that we already + # sent (successfully) up to and including event id 1 + event_1 = self.get_success(self.hs.get_datastores().main.get_event(event_id_1)) + assert event_1.internal_metadata.stream_ordering is not None + self.get_success( + self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( + "host2", event_1.internal_metadata.stream_ordering + ) + ) + + # also fetch event 2 so we can compare its stream ordering to the sender's + # last_successful_stream_ordering later + event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2)) + + # Mock event 3 as having partial state + self.get_success( + event_injection.mark_event_as_partial_state(self.hs, event_id_3, room) + ) + + # Fail the test if we block on full state for event 3. + async def mock_await_full_state(event_ids: Collection[str]) -> None: + if event_id_3 in event_ids: + raise AssertionError("Tried to await full state for event_id_3") + + # ACT + with mock.patch.object( + self.hs.get_storage_controllers().state._partial_state_events_tracker, + "await_full_state", + mock_await_full_state, + ): + self.get_success(per_dest_queue._catch_up_transmission_loop()) + + # ASSERT + # We should have: + # - not sent event 3: it's not ours, and the room is partial stated + # - fallen back to sending event 2: it's the most recent event in the room + # we tried to send to host2 + # - completed catch-up + self.assertEqual(len(sent_pdus), 1) + self.assertEqual(sent_pdus[0].event_id, event_id_2) + self.assertFalse(per_dest_queue._catching_up) + self.assertEqual( + per_dest_queue._last_successful_stream_ordering, + event_2.internal_metadata.stream_ordering, + ) diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 86e1236501fd..91694e4fcada 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -178,7 +178,7 @@ def test_get_pdu_event_from_cache_is_pristine(self) -> None: RoomVersions.V9, ) ) - self.assertIsNotNone(pulled_pdu_info2) + assert pulled_pdu_info2 is not None remote_pdu2 = pulled_pdu_info2.pdu # Sanity check that we are working against the same event @@ -226,7 +226,7 @@ def _get_pdu_once(self) -> EventBase: RoomVersions.V9, ) ) - self.assertIsNotNone(pulled_pdu_info) + assert pulled_pdu_info is not None remote_pdu = pulled_pdu_info.pdu # check the right call got made to the agent diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index ddeffe1ad53b..9e104fd96aeb 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -22,6 +22,7 @@ from synapse.api.constants import EduTypes, RoomEncryptionAlgorithms from synapse.federation.units import Transaction +from synapse.handlers.device import DeviceHandler from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer @@ -41,8 +42,9 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): """ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock(spec=["send_transaction"]) hs = self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=self.federation_transport_client, ) hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] @@ -61,9 +63,7 @@ def default_config(self) -> JsonDict: return config def test_send_receipts(self) -> None: - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() @@ -103,9 +103,7 @@ def test_send_receipts(self) -> None: ) def test_send_receipts_thread(self) -> None: - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) # Create receipts for: @@ -181,9 +179,7 @@ def test_send_receipts_thread(self) -> None: def test_send_receipts_with_backoff(self) -> None: """Send two receipts in quick succession; the second should be flushed, but only after 20ms""" - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() @@ -277,10 +273,11 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock( + spec=["send_transaction", "query_user_devices"] + ) return self.setup_test_homeserver( - federation_transport_client=Mock( - spec=["send_transaction", "query_user_devices"] - ), + federation_transport_client=self.federation_transport_client, ) def default_config(self) -> JsonDict: @@ -310,9 +307,13 @@ async def get_current_hosts_in_room(room_id: str) -> Set[str]: hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room # type: ignore[assignment] + device_handler = hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) + self.device_handler = device_handler + # whenever send_transaction is called, record the edu data self.edus: List[JsonDict] = [] - self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction ) @@ -353,7 +354,7 @@ def test_dont_send_device_updates_for_remote_users(self) -> None: # Send the server a device list EDU for the other user, this will cause # it to try and resync the device lists. - self.hs.get_federation_transport_client().query_user_devices.return_value = ( + self.federation_transport_client.query_user_devices.return_value = ( make_awaitable( { "stream_id": "1", @@ -364,7 +365,7 @@ def test_dont_send_device_updates_for_remote_users(self) -> None: ) self.get_success( - self.hs.get_device_handler().device_list_updater.incoming_device_list_update( + self.device_handler.device_list_updater.incoming_device_list_update( "host2", { "user_id": "@user2:host2", @@ -507,9 +508,7 @@ def test_delete_devices(self) -> None: stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D3", stream_id) # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -533,7 +532,7 @@ def test_unreachable_server(self) -> None: """If the destination server is unreachable, all the updates should get sent on recovery """ - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices @@ -543,9 +542,7 @@ def test_unreachable_server(self) -> None: self.login("user", "pass", device_id="D3") # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -580,7 +577,7 @@ def test_prune_outbound_device_pokes1(self) -> None: This case tests the behaviour when the server has never been reachable. """ - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices @@ -590,9 +587,7 @@ def test_prune_outbound_device_pokes1(self) -> None: self.login("user", "pass", device_id="D3") # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -640,7 +635,7 @@ def test_prune_outbound_device_pokes2(self) -> None: self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) # now the server goes offline - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) self.login("user", "pass", device_id="D2") @@ -651,9 +646,7 @@ def test_prune_outbound_device_pokes2(self) -> None: self.reactor.advance(1) # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) self.assertGreaterEqual(mock_send_txn.call_count, 3) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index bba6469b559b..6c7738d81020 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -34,7 +34,6 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 6f300b8e1119..5569ccef8aef 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -23,6 +23,7 @@ from synapse.api.room_versions import RoomVersions from synapse.rest.client import knock, login, room from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock from tests import unittest @@ -296,3 +297,58 @@ def test_connections(self) -> None: self.assertEqual(args[0][0]["user_agent"], "user_agent") self.assertGreater(args[0][0]["last_seen"], 0) self.assertNotIn("access_token", args[0][0]) + + def test_account_data(self) -> None: + """Tests that user account data get exported.""" + # add account data + self.get_success( + self._store.add_account_data_for_user(self.user2, "m.global", {"a": 1}) + ) + self.get_success( + self._store.add_account_data_to_room( + self.user2, "test_room", "m.per_room", {"b": 2} + ) + ) + + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + # two calls, one call for user data and one call for room data + writer.write_account_data.assert_called() + + args = writer.write_account_data.call_args_list[0][0] + self.assertEqual(args[0], "global") + self.assertEqual(args[1]["m.global"]["a"], 1) + + args = writer.write_account_data.call_args_list[1][0] + self.assertEqual(args[0], "test_room") + self.assertEqual(args[1]["m.per_room"]["b"], 2) + + def test_media_ids(self) -> None: + """Tests that media's metadata get exported.""" + + self.get_success( + self._store.store_local_media( + media_id="media_1", + media_type="image/png", + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=50, + user_id=UserID.from_string(self.user2), + ) + ) + + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + writer.write_media_id.assert_called_once() + + args = writer.write_media_id.call_args[0] + self.assertEqual(args[0], "media_1") + self.assertEqual(args[1]["media_id"], "media_1") + self.assertEqual(args[1]["media_length"], 50) + self.assertGreater(args[1]["created_ts"], 0) + self.assertIsNone(args[1]["upload_name"]) + self.assertIsNone(args[1]["last_access_ts"]) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 421f78316f4b..db6972baaaa7 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -903,7 +903,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastores().main.get_app_services = Mock( + self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment] return_value=self._services ) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 2733719d8270..63aad0d10c2f 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -61,7 +61,7 @@ def test_map_cas_user_to_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] cas_response = CasResponse("test_user", {}) request = _mock_request() @@ -89,7 +89,7 @@ def test_map_cas_user_to_existing_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # Map a user via SSO. cas_response = CasResponse("test_user", {}) @@ -129,7 +129,7 @@ def test_map_cas_user_to_invalid_localpart(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] cas_response = CasResponse("föö", {}) request = _mock_request() @@ -160,7 +160,7 @@ def test_required_attributes(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. cas_response = CasResponse("test_user", {}) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 95698bc27585..6b4cba65d069 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -23,6 +23,7 @@ from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -187,37 +188,37 @@ def test_fallback_key(self) -> None: ) # we should now have an unused alg1 key - res = self.get_success( + fallback_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, ["alg1"]) + self.assertEqual(fallback_res, ["alg1"]) # claiming an OTK when no OTKs are available should return the fallback # key - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) # we shouldn't have any unused fallback keys again - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, []) + self.assertEqual(unused_res, []) # claiming an OTK again should return the same fallback key - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) @@ -231,10 +232,10 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, []) + self.assertEqual(unused_res, []) # uploading a new fallback key should result in an unused fallback key self.get_success( @@ -245,10 +246,10 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, ["alg1"]) + self.assertEqual(unused_res, ["alg1"]) # if the user uploads a one-time key, the next claim should fetch the # one-time key, and then go back to the fallback @@ -258,23 +259,23 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: otk}}}, ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}}, ) @@ -287,13 +288,13 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) @@ -366,7 +367,7 @@ def test_reupload_signatures(self) -> None: self.get_success(self.handler.upload_signing_keys_for_user(local_user, keys1)) # upload two device keys, which will be signed later by the self-signing key - device_key_1 = { + device_key_1: JsonDict = { "user_id": local_user, "device_id": "abc", "algorithms": [ @@ -379,7 +380,7 @@ def test_reupload_signatures(self) -> None: }, "signatures": {local_user: {"ed25519:abc": "base64+signature"}}, } - device_key_2 = { + device_key_2: JsonDict = { "user_id": local_user, "device_id": "def", "algorithms": [ @@ -451,8 +452,10 @@ def test_self_signing_key_doesnt_show_up_as_device(self) -> None: } self.get_success(self.handler.upload_signing_keys_for_user(local_user, keys1)) + device_handler = self.hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) e = self.get_failure( - self.hs.get_device_handler().check_device_registered( + device_handler.check_device_registered( user_id=local_user, device_id="nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk", initial_device_display_name="new display name", @@ -475,7 +478,7 @@ def test_upload_signatures(self) -> None: device_id = "xyz" # private key: OMkooTr76ega06xNvXIGPbgvvxAOzmQncN8VObS7aBA device_pubkey = "NnHhnqiMFQkq969szYkooLaBAXW244ZOxgukCvm2ZeY" - device_key = { + device_key: JsonDict = { "user_id": local_user, "device_id": device_id, "algorithms": [ @@ -497,7 +500,7 @@ def test_upload_signatures(self) -> None: # private key: 2lonYOM6xYKdEsO+6KrC766xBcHnYnim1x/4LFGF8B0 master_pubkey = "nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk" - master_key = { + master_key: JsonDict = { "user_id": local_user, "usage": ["master"], "keys": {"ed25519:" + master_pubkey: master_pubkey}, @@ -540,7 +543,7 @@ def test_upload_signatures(self) -> None: # the first user other_user = "@otherboris:" + self.hs.hostname other_master_pubkey = "fHZ3NPiKxoLQm5OoZbKa99SYxprOjNs4TwJUKP+twCM" - other_master_key = { + other_master_key: JsonDict = { # private key: oyw2ZUx0O4GifbfFYM0nQvj9CL0b8B7cyN4FprtK8OI "user_id": other_user, "usage": ["master"], @@ -702,7 +705,7 @@ def test_query_devices_remote_no_sync(self) -> None: remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_client_keys = mock.Mock( + self.hs.get_federation_client().query_client_keys = mock.Mock( # type: ignore[assignment] return_value=make_awaitable( { "device_keys": {remote_user_id: {}}, @@ -782,7 +785,7 @@ def test_query_devices_remote_sync(self) -> None: remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_user_devices = mock.Mock( + self.hs.get_federation_client().query_user_devices = mock.Mock( # type: ignore[assignment] return_value=make_awaitable( { "user_id": remote_user_id, diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 57675fa407e4..bf0862ed541c 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -371,14 +371,14 @@ def test_backfill_ignores_known_events(self) -> None: # We mock out the FederationClient.backfill method, to pretend that a remote # server has returned our fake event. federation_client_backfill_mock = Mock(return_value=make_awaitable([event])) - self.hs.get_federation_client().backfill = federation_client_backfill_mock + self.hs.get_federation_client().backfill = federation_client_backfill_mock # type: ignore[assignment] # We also mock the persist method with a side effect of itself. This allows us # to track when it has been called while preserving its function. persist_events_and_notify_mock = Mock( side_effect=self.hs.get_federation_event_handler().persist_events_and_notify ) - self.hs.get_federation_event_handler().persist_events_and_notify = ( + self.hs.get_federation_event_handler().persist_events_and_notify = ( # type: ignore[assignment] persist_events_and_notify_mock ) @@ -575,26 +575,6 @@ def test_failed_partial_join_is_clean(self) -> None: fed_client = fed_handler.federation_client room_id = "!room:example.com" - membership_event = make_event_from_dict( - { - "room_id": room_id, - "type": "m.room.member", - "sender": "@alice:test", - "state_key": "@alice:test", - "content": {"membership": "join"}, - }, - RoomVersions.V10, - ) - - mock_make_membership_event = Mock( - return_value=make_awaitable( - ( - "example.com", - membership_event, - RoomVersions.V10, - ) - ) - ) EVENT_CREATE = make_event_from_dict( { @@ -640,6 +620,26 @@ def test_failed_partial_join_is_clean(self) -> None: }, room_version=RoomVersions.V10, ) + membership_event = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@alice:test", + "state_key": "@alice:test", + "content": {"membership": "join"}, + "prev_events": [EVENT_INVITATION_MEMBERSHIP.event_id], + }, + RoomVersions.V10, + ) + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + "example.com", + membership_event, + RoomVersions.V10, + ) + ) + ) mock_send_join = Mock( return_value=make_awaitable( SendJoinResult( @@ -712,12 +712,12 @@ async def sync_partial_state_room( fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): # Start the partial state sync. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Try to start another partial state sync. # Nothing should happen. - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # End the partial state sync @@ -729,7 +729,7 @@ async def sync_partial_state_room( # The next attempt to start the partial state sync should work. is_partial_state = True - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) def test_partial_state_room_sync_restart(self) -> None: @@ -764,7 +764,7 @@ async def sync_partial_state_room( fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): # Start the partial state sync. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Fail the partial state sync. @@ -773,11 +773,11 @@ async def sync_partial_state_room( self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Start the partial state sync again. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) # Deduplicate another partial state sync. - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) # Fail the partial state sync. @@ -786,6 +786,6 @@ async def sync_partial_state_room( self.assertEqual(mock_sync_partial_state_room.call_count, 3) mock_sync_partial_state_room.assert_called_with( initial_destination="hs3", - other_destinations=["hs2"], + other_destinations={"hs2"}, room_id="room_id", ) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 70ea4d15d4d6..c067e5bfe3dc 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -29,6 +29,7 @@ from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer +from synapse.state import StateResolutionStore from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort from synapse.types import JsonDict from synapse.util import Clock @@ -161,6 +162,7 @@ def _test_process_pulled_event_with_missing_state( if prev_exists_as_outlier: prev_event.internal_metadata.outlier = True persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None self.get_success( persistence.persist_event( prev_event, @@ -861,7 +863,7 @@ def test_process_pulled_event_with_rejected_missing_state(self) -> None: bert_member_event.event_id: bert_member_event, rejected_kick_event.event_id: rejected_kick_event, }, - state_res_store=main_store, + state_res_store=StateResolutionStore(main_store), ) ), [bert_member_event.event_id, rejected_kick_event.event_id], @@ -906,7 +908,7 @@ def test_process_pulled_event_with_rejected_missing_state(self) -> None: rejected_power_levels_event.event_id, ], event_map={}, - state_res_store=main_store, + state_res_store=StateResolutionStore(main_store), full_conflicted_set=set(), ) ), diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index c4727ab917fd..9691d66b48a0 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -18,7 +18,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer @@ -41,20 +41,21 @@ class EventCreationTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = self.hs.get_event_creation_handler() - self._persist_event_storage_controller = ( - self.hs.get_storage_controllers().persistence - ) + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persist_event_storage_controller = persistence self.user_id = self.register_user("tester", "foobar") self.access_token = self.login("tester", "foobar") self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.info = self.get_success( + info = self.get_success( self.hs.get_datastores().main.get_user_by_access_token( self.access_token, ) ) - self.token_id = self.info.token_id + assert info is not None + self.token_id = info.token_id self.requester = create_requester(self.user_id, access_token_id=self.token_id) @@ -78,7 +79,9 @@ def _create_and_persist_member_event(self) -> Tuple[EventBase, EventContext]: return memberEvent, memberEventContext - def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext]: + def _create_duplicate_event( + self, txn_id: str + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """Create a new event with the given transaction ID. All events produced by this method will be considered duplicates. """ @@ -106,7 +109,8 @@ def test_duplicated_txn_id(self) -> None: txn_id = "something_suitably_random" - event1, context = self._create_duplicate_event(txn_id) + event1, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event1)) ret_event1 = self.get_success( self.handler.handle_new_client_event( @@ -118,7 +122,8 @@ def test_duplicated_txn_id(self) -> None: self.assertEqual(event1.event_id, ret_event1.event_id) - event2, context = self._create_duplicate_event(txn_id) + event2, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event2)) # We want to test that the deduplication at the persit event end works, # so we want to make sure we test with different events. @@ -139,7 +144,9 @@ def test_duplicated_txn_id(self) -> None: # Let's test that calling `persist_event` directly also does the right # thing. - event3, context = self._create_duplicate_event(txn_id) + event3, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event3)) + self.assertNotEqual(event1.event_id, event3.event_id) ret_event3, event_pos3, _ = self.get_success( @@ -153,7 +160,8 @@ def test_duplicated_txn_id(self) -> None: # Let's test that calling `persist_events` directly also does the right # thing. - event4, context = self._create_duplicate_event(txn_id) + event4, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event4)) self.assertNotEqual(event1.event_id, event3.event_id) events, _ = self.get_success( @@ -173,8 +181,10 @@ def test_duplicated_txn_id_one_call(self) -> None: txn_id = "something_else_suitably_random" # Create two duplicate events to persist at the same time - event1, context1 = self._create_duplicate_event(txn_id) - event2, context2 = self._create_duplicate_event(txn_id) + event1, unpersisted_context1 = self._create_duplicate_event(txn_id) + context1 = self.get_success(unpersisted_context1.persist(event1)) + event2, unpersisted_context2 = self._create_duplicate_event(txn_id) + context2 = self.get_success(unpersisted_context2.persist(event2)) # Ensure their event IDs are different to start with self.assertNotEqual(event1.event_id, event2.event_id) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index adddbd002f50..951caaa6b3ba 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -150,7 +150,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.hs_patcher = self.fake_server.patch_homeserver(hs=hs) - self.hs_patcher.start() + self.hs_patcher.start() # type: ignore[attr-defined] self.handler = hs.get_oidc_handler() self.provider = self.handler._providers["oidc"] @@ -170,7 +170,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def tearDown(self) -> None: - self.hs_patcher.stop() + self.hs_patcher.stop() # type: ignore[attr-defined] return super().tearDown() def reset_mocks(self) -> None: diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 0916de64f548..aa91bc0a3df4 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -852,7 +852,7 @@ def _test_3pid_allowed(self, username: str, registration: bool) -> None: username: The username to use for the test. registration: Whether to test with registration URLs. """ - self.hs.get_identity_handler().send_threepid_validation = Mock( + self.hs.get_identity_handler().send_threepid_validation = Mock( # type: ignore[assignment] return_value=make_awaitable(0), ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 234f8bf537ce..3ab34d838db7 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -62,7 +62,7 @@ async def check_registration_for_spam( request_info: Collection[Tuple[str, str]], auth_provider_id: Optional[str], ) -> RegistrationBehaviour: - pass + return RegistrationBehaviour.ALLOW class DenyAll(TestSpamChecker): @@ -111,7 +111,7 @@ async def check_registration_for_spam( username: Optional[str], request_info: Collection[Tuple[str, str]], ) -> RegistrationBehaviour: - pass + return RegistrationBehaviour.ALLOW class LegacyAllowAll(TestLegacyRegistrationSpamChecker): @@ -204,7 +204,7 @@ def test_mau_limits_when_disabled(self) -> None: @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self) -> None: - self.store.count_monthly_users = Mock( + self.store.count_monthly_users = Mock( # type: ignore[assignment] return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) ) # Ensure does not throw exception @@ -305,7 +305,7 @@ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self) -> None: def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> None: room_alias_str = "#room:test" - self.store.count_real_users = Mock(return_value=make_awaitable(1)) + self.store.count_real_users = Mock(return_value=make_awaitable(1)) # type: ignore[assignment] self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -320,7 +320,7 @@ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> N def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user( self, ) -> None: - self.store.count_real_users = Mock(return_value=make_awaitable(2)) + self.store.count_real_users = Mock(return_value=make_awaitable(2)) # type: ignore[assignment] self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -347,6 +347,7 @@ def test_auto_create_auto_join_rooms_federated(self) -> None: # Ensure the room is properly not federated. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["federatable"]) self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "public") @@ -376,6 +377,7 @@ def test_auto_join_mxid_localpart(self) -> None: # Ensure the room is properly a public room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertEqual(room["join_rules"], "public") # Both users should be in the room. @@ -414,6 +416,7 @@ def test_auto_create_auto_join_room_preset(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "invite") self.assertEqual(room["guest_access"], "can_join") @@ -457,6 +460,7 @@ def test_auto_create_auto_join_room_preset_guest(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "invite") self.assertEqual(room["guest_access"], "can_join") @@ -504,7 +508,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self) -> None: # Lower the permissions of the inviter. event_creation_handler = self.hs.get_event_creation_handler() requester = create_requester(inviter) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creation_handler.create_event( requester, { @@ -516,6 +520,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_creation_handler.handle_new_client_event( requester, events_and_context=[(event, context)] diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 9b1b8b9f1301..b5c772a7aedf 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -134,7 +134,7 @@ def test_map_saml_response_to_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # send a mocked-up SAML response to the callback saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) @@ -164,7 +164,7 @@ def test_map_saml_response_to_existing_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # Map a user via SSO. saml_response = FakeAuthnResponse( @@ -206,11 +206,11 @@ def test_map_saml_response_to_invalid_localpart(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # mock out the error renderer too sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) + sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) request = _mock_request() @@ -227,9 +227,9 @@ def test_map_saml_response_to_user_retries(self) -> None: # stub out the auth handler and error renderer auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) + sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] # register a user to occupy the first-choice MXID store = self.hs.get_datastores().main @@ -312,7 +312,7 @@ def test_attribute_requirements(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index 137deab138b5..d6f43a98fcda 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -113,7 +113,6 @@ async def mock_get_file( headers: Optional[RawHeaders] = None, is_allowed_content_type: Optional[Callable[[str], bool]] = None, ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: - fake_response = FakeResponse(code=404) if url == "http://my.server/me.png": fake_response = FakeResponse( diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index f1a50c5bcb26..d11ded6c5b63 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -31,7 +31,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 1fe9563c98f3..94518a7196b4 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -74,8 +74,8 @@ def make_homeserver( mock_keyring.verify_json_for_server.return_value = make_awaitable(True) # we mock out the federation client too - mock_federation_client = Mock(spec=["put_json"]) - mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) + self.mock_federation_client = Mock(spec=["put_json"]) + self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) @@ -83,7 +83,7 @@ def make_homeserver( self.mock_hs_notifier = Mock() hs = self.setup_test_homeserver( notifier=self.mock_hs_notifier, - federation_http_client=mock_federation_client, + federation_http_client=self.mock_federation_client, keyring=mock_keyring, replication_streams={}, ) @@ -233,8 +233,7 @@ def test_started_typing_remote_send(self) -> None: ) ) - put_json = self.hs.get_federation_http_client().put_json - put_json.assert_called_once_with( + self.mock_federation_client.put_json.assert_called_once_with( "farm", path="/_matrix/federation/v1/send/1000000", data=_expect_edu_transaction( @@ -349,8 +348,7 @@ def test_stopped_typing(self) -> None: self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) - put_json = self.hs.get_federation_http_client().put_json - put_json.assert_called_once_with( + self.mock_federation_client.put_json.assert_called_once_with( "farm", path="/_matrix/federation/v1/send/1000000", data=_expect_edu_transaction( diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 75fc5a17a47b..da4d24082648 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Tuple +from typing import Any, Tuple from unittest.mock import Mock, patch from urllib.parse import quote @@ -19,21 +19,28 @@ import synapse.rest.admin from synapse.api.constants import UserTypes +from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.appservice import ApplicationService from synapse.rest.client import login, register, room, user_directory from synapse.server import HomeServer from synapse.storage.roommember import ProfileInfo -from synapse.types import create_requester +from synapse.types import JsonDict, UserProfile, create_requester from synapse.util import Clock from tests import unittest from tests.storage.test_user_directory import GetUserDirectoryTables -from tests.test_utils import make_awaitable +from tests.test_utils import event_injection, make_awaitable from tests.test_utils.event_injection import inject_member_event from tests.unittest import override_config +# A spam checker which doesn't implement anything, so create a bare object. +class UselessSpamChecker: + def __init__(self, config: Any): + pass + + class UserDirectoryTestCase(unittest.HomeserverTestCase): """Tests the UserDirectoryHandler. @@ -186,6 +193,13 @@ def test_excludes_appservice_sender(self) -> None: self.helper.join(room, self.appservice.sender, tok=self.appservice.token) self._check_only_one_user_in_directory(user, room) + def test_search_term_with_colon_in_it_does_not_raise(self) -> None: + """ + Regression test: Test that search terms with colons in them are acceptable. + """ + u1 = self.register_user("user1", "pass") + self.get_success(self.handler.search_users(u1, "haha:paamayim-nekudotayim", 10)) + def test_user_not_in_users_table(self) -> None: """Unclear how it happens, but on matrix.org we've seen join events for users who aren't in the users table. Test that we don't fall over @@ -773,7 +787,7 @@ def test_spam_checker(self) -> None: s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) - async def allow_all(user_profile: ProfileInfo) -> bool: + async def allow_all(user_profile: UserProfile) -> bool: # Allow all users. return False @@ -787,7 +801,7 @@ async def allow_all(user_profile: ProfileInfo) -> bool: self.assertEqual(len(s["results"]), 1) # Configure a spam checker that filters all users. - async def block_all(user_profile: ProfileInfo) -> bool: + async def block_all(user_profile: UserProfile) -> bool: # All users are spammy. return True @@ -797,6 +811,13 @@ async def block_all(user_profile: ProfileInfo) -> bool: s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 0) + @override_config( + { + "spam_checker": { + "module": "tests.handlers.test_user_directory.UselessSpamChecker" + } + } + ) def test_legacy_spam_checker(self) -> None: """ A spam checker without the expected method should be ignored. @@ -825,11 +846,6 @@ def test_legacy_spam_checker(self) -> None: self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)}) self.assertEqual(public_users, set()) - # Configure a spam checker. - spam_checker = self.hs.get_spam_checker() - # The spam checker doesn't need any methods, so create a bare object. - spam_checker.spam_checker = object() - # We get one search result when searching for user2 by user1. s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) @@ -949,13 +965,14 @@ def _add_user_to_room( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) - self.get_success( - self.hs.get_storage_controllers().persistence.persist_event(event, context) - ) + context = self.get_success(unpersisted_context.persist(event)) + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.get_success(persistence.persist_event(event, context)) def test_local_user_leaving_room_remains_in_user_directory(self) -> None: """We've chosen to simplify the user directory's implementation by @@ -1087,3 +1104,185 @@ def test_disabling_room_list(self) -> None: ) self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) + + +class UserDirectoryRemoteProfileTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + synapse.rest.admin.register_servlets, + register.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Re-enables updating the user directory, as that functionality is needed below. + config["update_user_directory_from_worker"] = None + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.alice = self.register_user("alice", "alice123") + self.alice_tok = self.login("alice", "alice123") + self.user_dir_helper = GetUserDirectoryTables(self.store) + self.user_dir_handler = hs.get_user_directory_handler() + self.profile_handler = hs.get_profile_handler() + + # Cancel the startup call: in the steady-state case we can't rely on it anyway. + assert self.user_dir_handler._refresh_remote_profiles_call_later is not None + self.user_dir_handler._refresh_remote_profiles_call_later.cancel() + + def test_public_rooms_have_profiles_collected(self) -> None: + """ + In a public room, member state events are treated as reflecting the user's + real profile and they are accepted. + (The main motivation for accepting this is to prevent having to query + *every* single profile change over federation.) + """ + room_id = self.helper.create_room_as( + self.alice, is_public=True, tok=self.alice_tok + ) + self.get_success( + event_injection.inject_member_event( + self.hs, + room_id, + "@bruce:remote", + "join", + "@bruce:remote", + extra_content={ + "displayname": "Bruce!", + "avatar_url": "mxc://remote/123", + }, + ) + ) + # Sending this event makes the streams move forward after the injection... + self.helper.send(room_id, "Test", tok=self.alice_tok) + self.pump(0.1) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo(display_name="Bruce!", avatar_url="mxc://remote/123"), + ) + + def test_private_rooms_do_not_have_profiles_collected(self) -> None: + """ + In a private room, member state events are not pulled out and used to populate + the user directory. + """ + room_id = self.helper.create_room_as( + self.alice, is_public=False, tok=self.alice_tok + ) + self.get_success( + event_injection.inject_member_event( + self.hs, + room_id, + "@bruce:remote", + "join", + "@bruce:remote", + extra_content={ + "displayname": "super-duper bruce", + "avatar_url": "mxc://remote/456", + }, + ) + ) + # Sending this event makes the streams move forward after the injection... + self.helper.send(room_id, "Test", tok=self.alice_tok) + self.pump(0.1) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertNotIn("@bruce:remote", profiles) + + def test_private_rooms_have_profiles_requested(self) -> None: + """ + When a name changes in a private room, the homeserver instead requests + the user's global profile over federation. + """ + + async def get_remote_profile( + user_id: str, ignore_backoff: bool = True + ) -> JsonDict: + if user_id == "@bruce:remote": + return { + "displayname": "Sir Bruce Bruceson", + "avatar_url": "mxc://remote/789", + } + else: + raise ValueError(f"unable to fetch {user_id}") + + with patch.object(self.profile_handler, "get_profile", get_remote_profile): + # Continue from the earlier test... + self.test_private_rooms_do_not_have_profiles_collected() + + # Advance by a minute + self.reactor.advance(61.0) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo( + display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789" + ), + ) + + def test_profile_requests_are_retried(self) -> None: + """ + When we fail to fetch the user's profile over federation, + we try again later. + """ + has_failed_once = False + + async def get_remote_profile( + user_id: str, ignore_backoff: bool = True + ) -> JsonDict: + nonlocal has_failed_once + if user_id == "@bruce:remote": + if not has_failed_once: + has_failed_once = True + raise SynapseError(502, "temporary network problem") + + return { + "displayname": "Sir Bruce Bruceson", + "avatar_url": "mxc://remote/789", + } + else: + raise ValueError(f"unable to fetch {user_id}") + + with patch.object(self.profile_handler, "get_profile", get_remote_profile): + # Continue from the earlier test... + self.test_private_rooms_do_not_have_profiles_collected() + + # Advance by a minute + self.reactor.advance(61.0) + + # The request has already failed once + self.assertTrue(has_failed_once) + + # The profile has yet to be updated. + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertNotIn( + "@bruce:remote", + profiles, + ) + + # Advance by five minutes, after the backoff has finished + self.reactor.advance(301.0) + + # The profile should have been updated now + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo( + display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789" + ), + ) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index acfdcd3bca25..eb7f53fee502 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -30,7 +30,7 @@ IOpenSSLClientConnectionCreator, IProtocolFactory, ) -from twisted.internet.protocol import Factory +from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web._newclient import ResponseNeverReceived from twisted.web.client import Agent @@ -63,7 +63,7 @@ get_test_ca_cert_file, ) from tests.server import FakeTransport, ThreadedMemoryReactorClock -from tests.utils import default_config +from tests.utils import checked_cast, default_config logger = logging.getLogger(__name__) @@ -146,8 +146,10 @@ def _make_connection( # # Normally this would be done by the TCP socket code in Twisted, but we are # stubbing that out here. - client_protocol = client_factory.buildProtocol(dummy_address) - assert isinstance(client_protocol, _WrappingProtocol) + # NB: we use a checked_cast here to workaround https://github.com/Shoobx/mypy-zope/issues/91) + client_protocol = checked_cast( + _WrappingProtocol, client_factory.buildProtocol(dummy_address) + ) client_protocol.makeConnection( FakeTransport(server_protocol, self.reactor, client_protocol) ) @@ -446,7 +448,6 @@ def _do_get_via_proxy( server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() ).buildProtocol(dummy_address) - assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport @@ -465,7 +466,8 @@ def _do_get_via_proxy( else: assert isinstance(proxy_server_transport, FakeTransport) client_protocol = proxy_server_transport.other - c2s_transport = client_protocol.transport + assert isinstance(client_protocol, Protocol) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) c2s_transport.other = server_ssl_protocol self.reactor.advance(0) @@ -1529,7 +1531,7 @@ def _check_logcontext(context: LoggingContextOrSentinel) -> None: def _wrap_server_factory_for_tls( factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> IProtocolFactory: +) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 7748f56ee6e2..6ab13357f942 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -46,7 +46,6 @@ def test_resolve(self) -> None: @defer.inlineCallbacks def do_lookup() -> Generator["Deferred[object]", object, List[Server]]: - with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) result: List[Server] diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 9cfe1ad0de0a..57b6a84e2319 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -149,7 +149,7 @@ def setUp(self) -> None: self.allowed_domain, self.allowed_ip = b"allowed.test", b"5.1.1.1" # Configure the reactor's DNS resolver. - for (domain, ip) in ( + for domain, ip in ( (self.safe_domain, self.safe_ip), (self.unsafe_domain, self.unsafe_ip), (self.allowed_domain, self.allowed_ip), @@ -210,8 +210,8 @@ def test_agent(self) -> None: """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs.""" agent = BlacklistingAgentWrapper( Agent(self.reactor), - ip_whitelist=self.ip_whitelist, ip_blacklist=self.ip_blacklist, + ip_whitelist=self.ip_whitelist, ) # The unsafe IPs should be rejected. diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index a817940730fd..cc175052ac78 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -28,7 +28,7 @@ _WrappingProtocol, ) from twisted.internet.interfaces import IProtocol, IProtocolFactory -from twisted.internet.protocol import Factory +from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel @@ -43,6 +43,7 @@ ) from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase +from tests.utils import checked_cast logger = logging.getLogger(__name__) @@ -620,7 +621,6 @@ def _do_https_request_via_proxy( server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() ).buildProtocol(dummy_address) - assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport @@ -644,7 +644,8 @@ def _do_https_request_via_proxy( else: assert isinstance(proxy_server_transport, FakeTransport) client_protocol = proxy_server_transport.other - c2s_transport = client_protocol.transport + assert isinstance(client_protocol, Protocol) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) c2s_transport.other = server_ssl_protocol self.reactor.advance(0) @@ -757,12 +758,14 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: assert isinstance(proxy_server, HTTPChannel) # fish the transports back out so that we can do the old switcheroo - s2c_transport = proxy_server.transport - assert isinstance(s2c_transport, FakeTransport) - client_protocol = s2c_transport.other - assert isinstance(client_protocol, _WrappingProtocol) - c2s_transport = client_protocol.transport - assert isinstance(c2s_transport, FakeTransport) + # To help mypy out with the various Protocols and wrappers and mocks, we do + # some explicit casting. Without the casts, we hit the bug I reported at + # https://github.com/Shoobx/mypy-zope/issues/91 . + # We also double-checked these casts at runtime (test-time) because I found it + # quite confusing to deduce these types in the first place! + s2c_transport = checked_cast(FakeTransport, proxy_server.transport) + client_protocol = checked_cast(_WrappingProtocol, s2c_transport.other) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -822,9 +825,9 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) def test_proxy_with_no_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) - self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") - self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) def test_proxy_with_unsupported_scheme(self) -> None: @@ -834,25 +837,21 @@ def test_proxy_with_unsupported_scheme(self) -> None: @patch.dict(os.environ, {"http_proxy": "http://proxy.com:8888"}) def test_proxy_with_http_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) - self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") - self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) def test_proxy_with_https_scheme(self) -> None: https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) - self.assertEqual( - https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._hostStr, "proxy.com" - ) - self.assertEqual( - https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._port, 8888 - ) + proxy_ep = checked_cast(_WrapperEndpoint, https_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._wrappedEndpoint._hostStr, "proxy.com") + self.assertEqual(proxy_ep._wrappedEndpoint._port, 8888) def _wrap_server_factory_for_tls( factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> IProtocolFactory: +) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index c08954d887cb..5191e31a8ae8 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -21,6 +21,7 @@ from tests.logging import LoggerCleanupMixin from tests.server import FakeTransport, get_clock from tests.unittest import TestCase +from tests.utils import checked_cast def connect_logging_client( @@ -56,8 +57,8 @@ def test_log_output(self) -> None: client, server = connect_logging_client(self.reactor, 0) # Trigger data being sent - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # One log message, with a single trailing newline logs = server.data.decode("utf8").splitlines() @@ -89,8 +90,8 @@ def test_log_backpressure_debug(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # Only the 7 infos made it through, the debugs were elided logs = server.data.splitlines() @@ -123,8 +124,8 @@ def test_log_backpressure_info(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # The 10 warnings made it through, the debugs and infos were elided logs = server.data.splitlines() @@ -148,8 +149,8 @@ def test_log_backpressure_cut_middle(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # The first five and last five warnings made it through, the debugs and # infos were elided diff --git a/tests/rest/media/v1/__init__.py b/tests/media/__init__.py similarity index 91% rename from tests/rest/media/v1/__init__.py rename to tests/media/__init__.py index b1ee10cfcc44..68910cbf5b96 100644 --- a/tests/rest/media/v1/__init__.py +++ b/tests/media/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2018 New Vector Ltd +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/rest/media/v1/test_base.py b/tests/media/test_base.py similarity index 95% rename from tests/rest/media/v1/test_base.py rename to tests/media/test_base.py index c73179151adb..66498c744d6d 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/media/test_base.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.rest.media.v1._base import get_filename_from_headers +from synapse.media._base import get_filename_from_headers from tests import unittest diff --git a/tests/rest/media/v1/test_filepath.py b/tests/media/test_filepath.py similarity index 99% rename from tests/rest/media/v1/test_filepath.py rename to tests/media/test_filepath.py index 43e6f0f70aad..95e3b83d5acb 100644 --- a/tests/rest/media/v1/test_filepath.py +++ b/tests/media/test_filepath.py @@ -15,7 +15,7 @@ import os from typing import Iterable -from synapse.rest.media.v1.filepath import MediaFilePaths, _wrap_with_jail_check +from synapse.media.filepath import MediaFilePaths, _wrap_with_jail_check from tests import unittest diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/media/test_html_preview.py similarity index 99% rename from tests/rest/media/v1/test_html_preview.py rename to tests/media/test_html_preview.py index 1062081a060b..e7da75db3ee0 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/media/test_html_preview.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.rest.media.v1.preview_html import ( +from synapse.media.preview_html import ( _get_html_media_encodings, decode_body, parse_html_to_open_graph, diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/media/test_media_storage.py similarity index 93% rename from tests/rest/media/v1/test_media_storage.py rename to tests/media/test_media_storage.py index d18fc13c21d6..870047d0f250 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -16,7 +16,7 @@ import tempfile from binascii import unhexlify from io import BytesIO -from typing import Any, BinaryIO, Dict, List, Optional, Union +from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Tuple, Union from unittest.mock import Mock from urllib import parse @@ -32,16 +32,17 @@ from synapse.api.errors import Codes from synapse.events import EventBase from synapse.events.spamcheck import load_legacy_spam_checkers +from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable +from synapse.media._base import FileInfo +from synapse.media.filepath import MediaFilePaths +from synapse.media.media_storage import MediaStorage, ReadableFileWrapper +from synapse.media.storage_provider import FileStorageProviderBackend from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login -from synapse.rest.media.v1._base import FileInfo -from synapse.rest.media.v1.filepath import MediaFilePaths -from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper -from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend from synapse.server import HomeServer -from synapse.types import RoomAlias +from synapse.types import JsonDict, RoomAlias from synapse.util import Clock from tests import unittest @@ -51,7 +52,6 @@ class MediaStorageTests(unittest.HomeserverTestCase): - needs_threadpool = True def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -201,36 +201,45 @@ class _TestImage: ], ) class MediaRepoTests(unittest.HomeserverTestCase): - + test_image: ClassVar[_TestImage] hijack_auth = True user_id = "@test:user" def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - - self.fetches = [] + self.fetches: List[ + Tuple[ + "Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]]", + str, + str, + Optional[QueryParams], + ] + ] = [] def get_file( destination: str, path: str, output_stream: BinaryIO, - args: Optional[Dict[str, Union[str, List[str]]]] = None, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, max_size: Optional[int] = None, - ) -> Deferred: - """ - Returns tuple[int,dict,str,int] of file length, response headers, - absolute URI, and response code. - """ + ignore_backoff: bool = False, + ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]": + """A mock for MatrixFederationHttpClient.get_file.""" - def write_to(r): + def write_to( + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) return response - d = Deferred() - d.addCallback(write_to) + d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred() self.fetches.append((d, destination, path, args)) - return make_deferred_yieldable(d) + # Note that this callback changes the value held by d. + d_after_callback = d.addCallback(write_to) + return make_deferred_yieldable(d_after_callback) + # Mock out the homeserver's MatrixFederationHttpClient client = Mock() client.get_file = get_file @@ -244,7 +253,7 @@ def write_to(r): config["max_image_pixels"] = 2000000 provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", + "module": "synapse.media.storage_provider.FileStorageProviderBackend", "store_local": True, "store_synchronous": False, "store_remote": True, @@ -257,7 +266,6 @@ def write_to(r): return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_resource = hs.get_media_repository_resource() self.download_resource = media_resource.children[b"download"] self.thumbnail_resource = media_resource.children[b"thumbnail"] @@ -461,6 +469,7 @@ def test_thumbnail_repeated_thumbnail(self) -> None: # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) + assert info is not None file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( @@ -581,7 +590,7 @@ def test_same_quality(self, method: str, desired_size: int) -> None: "thumbnail_method": method, "thumbnail_type": self.test_image.content_type, "thumbnail_length": 256, - "filesystem_id": f"thumbnail1{self.test_image.extension}", + "filesystem_id": f"thumbnail1{self.test_image.extension.decode()}", }, { "thumbnail_width": 32, @@ -589,10 +598,10 @@ def test_same_quality(self, method: str, desired_size: int) -> None: "thumbnail_method": method, "thumbnail_type": self.test_image.content_type, "thumbnail_length": 256, - "filesystem_id": f"thumbnail2{self.test_image.extension}", + "filesystem_id": f"thumbnail2{self.test_image.extension.decode()}", }, ], - file_id=f"image{self.test_image.extension}", + file_id=f"image{self.test_image.extension.decode()}", url_cache=None, server_name=None, ) @@ -637,6 +646,7 @@ def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: self.config = config self.api = api + @staticmethod def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config @@ -748,7 +758,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Codes, Literal["NOT_SPAM"]]: + ) -> Union[Codes, Literal["NOT_SPAM"], Tuple[Codes, JsonDict]]: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) diff --git a/tests/rest/media/v1/test_oembed.py b/tests/media/test_oembed.py similarity index 98% rename from tests/rest/media/v1/test_oembed.py rename to tests/media/test_oembed.py index 3f7f1dbab9b7..c8bf8421daf1 100644 --- a/tests/rest/media/v1/test_oembed.py +++ b/tests/media/test_oembed.py @@ -18,7 +18,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult +from synapse.media.oembed import OEmbedProvider, OEmbedResult from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 8f88c0117d78..3a1929691e9c 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict from unittest.mock import Mock from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EduTypes, EventTypes from synapse.api.errors import NotFoundError @@ -21,9 +23,12 @@ from synapse.federation.units import Transaction from synapse.handlers.presence import UserPresenceState from synapse.handlers.push_rules import InvalidRuleException +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, notifications, presence, profile, room -from synapse.types import create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, create_requester +from synapse.util import Clock from tests.events.test_presence_router import send_presence_update, sync_presence from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -32,7 +37,19 @@ from tests.unittest import HomeserverTestCase, override_config -class ModuleApiTestCase(HomeserverTestCase): +class BaseModuleApiTestCase(HomeserverTestCase): + """Common properties of the two test case classes.""" + + module_api: ModuleApi + + # These are all written by _test_sending_local_online_presence_to_local_user. + presence_receiver_id: str + presence_receiver_tok: str + presence_sender_id: str + presence_sender_tok: str + + +class ModuleApiTestCase(BaseModuleApiTestCase): servlets = [ admin.register_servlets, login.register_servlets, @@ -42,23 +59,23 @@ class ModuleApiTestCase(HomeserverTestCase): notifications.register_servlets, ] - def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastores().main - self.module_api = homeserver.get_module_api() - self.event_creation_handler = homeserver.get_event_creation_handler() - self.sync_handler = homeserver.get_sync_handler() - self.auth_handler = homeserver.get_auth_handler() + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.module_api = hs.get_module_api() + self.event_creation_handler = hs.get_event_creation_handler() + self.sync_handler = hs.get_sync_handler() + self.auth_handler = hs.get_auth_handler() - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. - fed_transport_client = Mock(spec=["send_transaction"]) - fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client = Mock(spec=["send_transaction"]) + self.fed_transport_client.send_transaction = simple_async_mock({}) return self.setup_test_homeserver( - federation_transport_client=fed_transport_client, + federation_transport_client=self.fed_transport_client, ) - def test_can_register_user(self): + def test_can_register_user(self) -> None: """Tests that an external module can register a user""" # Register a new user user_id, access_token = self.get_success( @@ -88,16 +105,17 @@ def test_can_register_user(self): displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") - def test_can_register_admin_user(self): + def test_can_register_admin_user(self) -> None: user_id = self.register_user( "bob_module_admin", "1234", displayname="Bobberino Admin", admin=True ) found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, True) - def test_can_set_admin(self): + def test_can_set_admin(self) -> None: user_id = self.register_user( "alice_wants_admin", "1234", @@ -107,16 +125,17 @@ def test_can_set_admin(self): self.get_success(self.module_api.set_user_admin(user_id, True)) found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, True) - def test_can_set_displayname(self): + def test_can_set_displayname(self) -> None: localpart = "alice_wants_a_new_displayname" user_id = self.register_user( localpart, "1234", displayname="Alice", admin=False ) found_userinfo = self.get_success(self.module_api.get_userinfo_by_id(user_id)) - + assert found_userinfo is not None self.get_success( self.module_api.set_displayname( found_userinfo.user_id, "Bob", deactivation=False @@ -128,17 +147,18 @@ def test_can_set_displayname(self): self.assertEqual(found_profile.display_name, "Bob") - def test_get_userinfo_by_id(self): + def test_get_userinfo_by_id(self) -> None: user_id = self.register_user("alice", "1234") found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, False) - def test_get_userinfo_by_id__no_user_found(self): + def test_get_userinfo_by_id__no_user_found(self) -> None: found_user = self.get_success(self.module_api.get_userinfo_by_id("@alice:test")) self.assertIsNone(found_user) - def test_get_user_ip_and_agents(self): + def test_get_user_ip_and_agents(self) -> None: user_id = self.register_user("test_get_user_ip_and_agents_user", "1234") # Initially, we should have no ip/agent for our user. @@ -185,7 +205,7 @@ def test_get_user_ip_and_agents(self): # we should only find the second ip, agent. info = self.get_success( self.module_api.get_user_ip_and_agents( - user_id, (last_seen_1 + last_seen_2) / 2 + user_id, (last_seen_1 + last_seen_2) // 2 ) ) self.assertEqual(len(info), 1) @@ -200,7 +220,7 @@ def test_get_user_ip_and_agents(self): ) self.assertEqual(info, []) - def test_get_user_ip_and_agents__no_user_found(self): + def test_get_user_ip_and_agents__no_user_found(self) -> None: info = self.get_success( self.module_api.get_user_ip_and_agents( "@test_get_user_ip_and_agents_user_nonexistent:example.com" @@ -208,10 +228,10 @@ def test_get_user_ip_and_agents__no_user_found(self): ) self.assertEqual(info, []) - def test_sending_events_into_room(self): + def test_sending_events_into_room(self) -> None: """Tests that a module can send events into a room""" # Mock out create_and_send_nonmember_event to check whether events are being sent - self.event_creation_handler.create_and_send_nonmember_event = Mock( + self.event_creation_handler.create_and_send_nonmember_event = Mock( # type: ignore[assignment] spec=[], side_effect=self.event_creation_handler.create_and_send_nonmember_event, ) @@ -222,7 +242,7 @@ def test_sending_events_into_room(self): room_id = self.helper.create_room_as(user_id, tok=tok) # Create and send a non-state event - content = {"body": "I am a puppet", "msgtype": "m.text"} + content: JsonDict = {"body": "I am a puppet", "msgtype": "m.text"} event_dict = { "room_id": room_id, "type": "m.room.message", @@ -265,7 +285,7 @@ def test_sending_events_into_room(self): "sender": user_id, "state_key": "", } - event: EventBase = self.get_success( + event = self.get_success( self.module_api.create_and_send_event_into_room(event_dict) ) self.assertEqual(event.sender, user_id) @@ -303,7 +323,7 @@ def test_sending_events_into_room(self): self.module_api.create_and_send_event_into_room(event_dict), Exception ) - def test_public_rooms(self): + def test_public_rooms(self) -> None: """Tests that a room can be added and removed from the public rooms list, as well as have its public rooms directory state queried. """ @@ -350,13 +370,13 @@ def test_public_rooms(self): ) self.assertFalse(is_in_public_rooms) - def test_send_local_online_presence_to(self): + def test_send_local_online_presence_to(self) -> None: # Test sending local online presence to users from the main process _test_sending_local_online_presence_to_local_user(self, test_with_workers=False) # Enable federation sending on the main process. @override_config({"federation_sender_instances": None}) - def test_send_local_online_presence_to_federation(self): + def test_send_local_online_presence_to_federation(self) -> None: """Tests that send_local_presence_to_users sends local online presence to remote users.""" # Create a user who will send presence updates self.presence_sender_id = self.register_user("presence_sender1", "monkey") @@ -397,7 +417,7 @@ def test_send_local_online_presence_to_federation(self): # # Thus we reset the mock, and try sending online local user # presence again - self.hs.get_federation_transport_client().send_transaction.reset_mock() + self.fed_transport_client.send_transaction.reset_mock() # Broadcast local user online presence self.get_success( @@ -409,9 +429,7 @@ def test_send_local_online_presence_to_federation(self): # Check that a presence update was sent as part of a federation transaction found_update = False - calls = ( - self.hs.get_federation_transport_client().send_transaction.call_args_list - ) + calls = self.fed_transport_client.send_transaction.call_args_list for call in calls: call_args = call[0] federation_transaction: Transaction = call_args[0] @@ -431,7 +449,7 @@ def test_send_local_online_presence_to_federation(self): self.assertTrue(found_update) - def test_update_membership(self): + def test_update_membership(self) -> None: """Tests that the module API can update the membership of a user in a room.""" peter = self.register_user("peter", "hackme") lesley = self.register_user("lesley", "hackme") @@ -554,14 +572,14 @@ def test_update_membership(self): self.assertEqual(res["displayname"], "simone") self.assertIsNone(res["avatar_url"]) - def test_update_room_membership_remote_join(self): + def test_update_room_membership_remote_join(self) -> None: """Test that the module API can join a remote room.""" # Necessary to fake a remote join. fake_stream_id = 1 mocked_remote_join = simple_async_mock( return_value=("fake-event-id", fake_stream_id) ) - self.hs.get_room_member_handler()._remote_join = mocked_remote_join + self.hs.get_room_member_handler()._remote_join = mocked_remote_join # type: ignore[assignment] fake_remote_host = f"{self.module_api.server_name}-remote" # Given that the join is to be faked, we expect the relevant join event not to @@ -582,7 +600,7 @@ def test_update_room_membership_remote_join(self): # Check that a remote join was attempted. self.assertEqual(mocked_remote_join.call_count, 1) - def test_get_room_state(self): + def test_get_room_state(self) -> None: """Tests that a module can retrieve the state of a room through the module API.""" user_id = self.register_user("peter", "hackme") tok = self.login("peter", "hackme") @@ -677,7 +695,7 @@ def test_check_push_rules_actions(self) -> None: self.module_api.check_push_rule_actions(["foo"]) with self.assertRaises(InvalidRuleException): - self.module_api.check_push_rule_actions({"foo": "bar"}) + self.module_api.check_push_rule_actions([{"foo": "bar"}]) self.module_api.check_push_rule_actions(["notify"]) @@ -756,7 +774,7 @@ def test_create_room(self) -> None: self.assertIsNone(room_alias) -class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): +class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" servlets = [ @@ -766,7 +784,7 @@ class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): presence.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: conf = super().default_config() conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { @@ -774,18 +792,18 @@ def default_config(self): } return conf - def prepare(self, reactor, clock, homeserver): - self.module_api = homeserver.get_module_api() - self.sync_handler = homeserver.get_sync_handler() + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.module_api = hs.get_module_api() + self.sync_handler = hs.get_sync_handler() - def test_send_local_online_presence_to_workers(self): + def test_send_local_online_presence_to_workers(self) -> None: # Test sending local online presence to users from a worker process _test_sending_local_online_presence_to_local_user(self, test_with_workers=True) def _test_sending_local_online_presence_to_local_user( - test_case: HomeserverTestCase, test_with_workers: bool = False -): + test_case: BaseModuleApiTestCase, test_with_workers: bool = False +) -> None: """Tests that send_local_presence_to_users sends local online presence to local users. This simultaneously tests two different usecases: @@ -852,6 +870,7 @@ def _test_sending_local_online_presence_to_local_user( # Replicate the current sync presence token from the main process to the worker process. # We need to do this so that the worker process knows the current presence stream ID to # insert into the database when we call ModuleApi.send_local_online_presence_to. + assert isinstance(test_case, BaseMultiWorkerStreamTestCase) test_case.replicate() # Syncing again should result in no presence updates @@ -868,6 +887,7 @@ def _test_sending_local_online_presence_to_local_user( # Determine on which process (main or worker) to call ModuleApi.send_local_online_presence_to on if test_with_workers: + assert isinstance(test_case, BaseMultiWorkerStreamTestCase) module_api_to_use = worker_hs.get_module_api() else: module_api_to_use = test_case.module_api @@ -875,12 +895,11 @@ def _test_sending_local_online_presence_to_local_user( # Trigger sending local online presence. We expect this information # to be saved to the database where all processes can access it. # Note that we're syncing via the master. - d = module_api_to_use.send_local_online_presence_to( - [ - test_case.presence_receiver_id, - ] + d = defer.ensureDeferred( + module_api_to_use.send_local_online_presence_to( + [test_case.presence_receiver_id], + ) ) - d = defer.ensureDeferred(d) if test_with_workers: # In order for the required presence_set_state replication request to occur between the @@ -897,7 +916,7 @@ def _test_sending_local_online_presence_to_local_user( ) test_case.assertEqual(len(presence_updates), 1) - presence_update: UserPresenceState = presence_updates[0] + presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") @@ -908,7 +927,7 @@ def _test_sending_local_online_presence_to_local_user( ) test_case.assertEqual(len(presence_updates), 1) - presence_update: UserPresenceState = presence_updates[0] + presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") @@ -936,12 +955,13 @@ def _test_sending_local_online_presence_to_local_user( test_case.assertEqual(len(presence_updates), 1) # Now trigger sending local online presence. - d = module_api_to_use.send_local_online_presence_to( - [ - test_case.presence_receiver_id, - ] + d = defer.ensureDeferred( + module_api_to_use.send_local_online_presence_to( + [ + test_case.presence_receiver_id, + ] + ) ) - d = defer.ensureDeferred(d) if test_with_workers: # In order for the required presence_set_state replication request to occur between the diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 7567756135b7..46df0102f77a 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -33,7 +33,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -131,7 +130,7 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( # Create a new message event, and try to evaluate it under the dodgy # power level event. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -146,6 +145,7 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( prev_event_ids=[pl_event_id], ) ) + context = self.get_success(unpersisted_context.persist(event)) bulk_evaluator = BulkPushRuleEvaluator(self.hs) # should not raise @@ -171,7 +171,7 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: """Ensure that push rules are not calculated when disabled in the config""" # Create a new message event which should cause a notification. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -185,6 +185,7 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) bulk_evaluator = BulkPushRuleEvaluator(self.hs) # Mock the method which calculates push rules -- we do this instead of @@ -201,7 +202,7 @@ def _create_and_process( ) -> bool: """Returns true iff the `mentions` trigger an event push action.""" # Create a new message event which should cause a notification. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -212,7 +213,7 @@ def _create_and_process( }, ) ) - + context = self.get_success(unpersisted_context.persist(event)) # Execute the push rule machinery. self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) @@ -377,7 +378,7 @@ def test_suppress_edits(self) -> None: bulk_evaluator = BulkPushRuleEvaluator(self.hs) # Create & persist an event to use as the parent of the relation. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -391,6 +392,7 @@ def test_suppress_edits(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( self.event_creation_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] diff --git a/tests/push/test_email.py b/tests/push/test_email.py index ab8bb417e759..4ea5472eb475 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -23,6 +23,7 @@ import synapse.rest.admin from synapse.api.errors import Codes, SynapseError +from synapse.push.emailpusher import EmailPusher from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.util import Clock @@ -38,7 +39,6 @@ class _User: class EmailPusherTests(HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -47,7 +47,6 @@ class EmailPusherTests(HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["email"] = { "enable_notifs": True, @@ -105,6 +104,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(self.access_token) ) + assert user_tuple is not None self.token_id = user_tuple.token_id # We need to add email to account before we can create a pusher. @@ -114,7 +114,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) ) - self.pusher = self.get_success( + pusher = self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, @@ -127,6 +127,8 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: data={}, ) ) + assert isinstance(pusher, EmailPusher) + self.pusher = pusher self.auth_handler = hs.get_auth_handler() self.store = hs.get_datastores().main @@ -367,18 +369,19 @@ def test_no_email_sent_after_removed(self) -> None: # disassociate the user's email address self.get_success( - self.auth_handler.delete_threepid( - user_id=self.user_id, - medium="email", - address="a@example.com", + self.auth_handler.delete_local_threepid( + user_id=self.user_id, medium="email", address="a@example.com" ) ) # check that the pusher for that email address has been deleted - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 0) def test_remove_unlinked_pushers_background_job(self) -> None: @@ -413,10 +416,13 @@ def test_remove_unlinked_pushers_background_job(self) -> None: self.wait_for_background_updates() # Check that all pushers with unlinked addresses were deleted - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 0) def _check_for_mail(self) -> Tuple[Sequence, Dict]: @@ -428,10 +434,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: that notification. """ # Get the stream ordering before it gets sent - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0].last_stream_ordering @@ -439,10 +448,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: self.pump(10) # It hasn't succeeded yet, so the stream ordering shouldn't have moved - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0].last_stream_ordering) @@ -458,10 +470,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: self.assertEqual(len(self.email_attempts), 1) # The stream ordering has increased - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 1e4f97406a10..eb013f767057 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Any, List, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred @@ -22,7 +22,6 @@ from synapse.push import PusherConfig, PusherConfigException from synapse.rest.client import login, push_rule, pusher, receipts, room from synapse.server import HomeServer -from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import JsonDict from synapse.util import Clock @@ -67,9 +66,10 @@ def test_invalid_configuration(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id - def test_data(data: Optional[JsonDict]) -> None: + def test_data(data: Any) -> None: self.get_failure( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, @@ -113,6 +113,7 @@ def test_sends_http(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -140,10 +141,11 @@ def test_sends_http(self) -> None: self.helper.send(room, body="There!", tok=other_access_token) # Get the stream ordering before it gets sent - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0].last_stream_ordering @@ -151,10 +153,11 @@ def test_sends_http(self) -> None: self.pump() # It hasn't succeeded yet, so the stream ordering shouldn't have moved - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0].last_stream_ordering) @@ -172,10 +175,11 @@ def test_sends_http(self) -> None: self.pump() # The stream ordering has increased - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) last_stream_ordering = pushers[0].last_stream_ordering @@ -194,10 +198,11 @@ def test_sends_http(self) -> None: self.pump() # The stream ordering has increased, again - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) @@ -229,6 +234,7 @@ def test_sends_high_priority_for_encrypted(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -349,6 +355,7 @@ def test_sends_high_priority_for_one_to_one_only(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -436,6 +443,7 @@ def test_sends_high_priority_for_mention(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -514,6 +522,7 @@ def test_sends_high_priority_for_atroom(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -621,6 +630,7 @@ def _test_push_unread_count(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -756,6 +766,7 @@ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -898,6 +909,7 @@ def test_update_different_device_access_token_device_id(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id device_id = user_tuple.device_id @@ -944,9 +956,10 @@ def test_device_id(self) -> None: ) # Look up the user info for the access token so we can compare the device ID. - lookup_result: TokenLookupResult = self.get_success( + lookup_result = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert lookup_result is not None # Get the user's devices and check it has the correct device ID. channel = self.make_request("GET", "/pushers", access_token=access_token) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index de1c4e19eea0..331de20faf04 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional, Set, Union, cast +from typing import Any, Dict, List, Optional, Union, cast import frozendict @@ -32,6 +32,7 @@ from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator, PushRules from synapse.types import JsonDict, JsonMapping, UserID from synapse.util import Clock +from synapse.util.frozenutils import freeze from tests import unittest from tests.test_utils.event_injection import create_event, inject_member_event @@ -48,17 +49,30 @@ def test_nested(self) -> None: input = {"foo": {"bar": "abc"}} self.assertEqual({"foo.bar": "abc"}, _flatten_dict(input)) + # If a field has a dot in it, escape it. + input = {"m.foo": {"b\\ar": "abc"}} + self.assertEqual({"m\\.foo.b\\\\ar": "abc"}, _flatten_dict(input)) + def test_non_string(self) -> None: - """Non-string items are dropped.""" + """String, booleans, ints, nulls and list of those should be kept while other items are dropped.""" input: Dict[str, Any] = { "woo": "woo", "foo": True, "bar": 1, "baz": None, - "fuzz": [], + "fuzz": ["woo", True, 1, None, [], {}], "boo": {}, } - self.assertEqual({"woo": "woo", "foo": "true"}, _flatten_dict(input)) + self.assertEqual( + { + "woo": "woo", + "foo": True, + "bar": 1, + "baz": None, + "fuzz": ["woo", True, 1, None], + }, + _flatten_dict(input), + ) def test_event(self) -> None: """Events can also be flattened.""" @@ -78,9 +92,9 @@ def test_event(self) -> None: ) expected = { "content.msgtype": "m.text", - "content.body": "hello world!", + "content.body": "Hello world!", "content.format": "org.matrix.custom.html", - "content.formatted_body": "

hello world!

", + "content.formatted_body": "

Hello world!

", "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", @@ -107,6 +121,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", + "content.org\\.matrix\\.msc1767\\.markup": [], } self.assertEqual(expected, _flatten_dict(event)) @@ -118,6 +133,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", + "content.org\\.matrix\\.msc1767\\.markup": [], } self.assertEqual(expected, _flatten_dict(event)) @@ -127,9 +143,6 @@ def _get_evaluator( self, content: JsonMapping, *, - has_mentions: bool = False, - user_mentions: Optional[Set[str]] = None, - room_mention: bool = False, related_events: Optional[JsonDict] = None, ) -> PushRuleEvaluator: event = FrozenEvent( @@ -148,9 +161,7 @@ def _get_evaluator( power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluator( _flatten_dict(event), - has_mentions, - user_mentions or set(), - room_mention, + False, room_member_count, sender_power_level, cast(Dict[str, int], power_levels.get("notifications", {})), @@ -184,53 +195,6 @@ def test_display_name(self) -> None: # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) - def test_user_mentions(self) -> None: - """Check for user mentions.""" - condition = {"kind": "org.matrix.msc3952.is_user_mention"} - - # No mentions shouldn't match. - evaluator = self._get_evaluator({}, has_mentions=True) - self.assertFalse(evaluator.matches(condition, "@user:test", None)) - - # An empty set shouldn't match - evaluator = self._get_evaluator({}, has_mentions=True, user_mentions=set()) - self.assertFalse(evaluator.matches(condition, "@user:test", None)) - - # The Matrix ID appearing anywhere in the mentions list should match - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@user:test"} - ) - self.assertTrue(evaluator.matches(condition, "@user:test", None)) - - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@another:test", "@user:test"} - ) - self.assertTrue(evaluator.matches(condition, "@user:test", None)) - - # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions - # since the BulkPushRuleEvaluator is what handles data sanitisation. - - def test_room_mentions(self) -> None: - """Check for room mentions.""" - condition = {"kind": "org.matrix.msc3952.is_room_mention"} - - # No room mention shouldn't match. - evaluator = self._get_evaluator({}, has_mentions=True) - self.assertFalse(evaluator.matches(condition, None, None)) - - # Room mention should match. - evaluator = self._get_evaluator({}, has_mentions=True, room_mention=True) - self.assertTrue(evaluator.matches(condition, None, None)) - - # A room mention and user mention is valid. - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@another:test"}, room_mention=True - ) - self.assertTrue(evaluator.matches(condition, None, None)) - - # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions - # since the BulkPushRuleEvaluator is what handles data sanitisation. - def _assert_matches( self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None ) -> None: @@ -402,6 +366,193 @@ def test_event_match_non_body(self) -> None: "pattern should not match before a newline", ) + def test_event_match_pattern(self) -> None: + """Check that event_match conditions do not use a "pattern_type" from user data.""" + + # The pattern_type should not be deserialized into anything valid. + condition = { + "kind": "event_match", + "key": "content.value", + "pattern_type": "user_id", + } + self._assert_not_matches( + condition, + {"value": "@user:test"}, + "should not be possible to pass a pattern_type in", + ) + + # This is an internal-only condition which shouldn't get deserialized. + condition = { + "kind": "event_match_type", + "key": "content.value", + "pattern_type": "user_id", + } + self._assert_not_matches( + condition, + {"value": "@user:test"}, + "should not be possible to pass a pattern_type in", + ) + + def test_exact_event_match_string(self) -> None: + """Check that exact_event_match conditions work as expected for strings.""" + + # Test against a string value. + condition = { + "kind": "event_property_is", + "key": "content.value", + "value": "foobaz", + } + self._assert_matches( + condition, + {"value": "foobaz"}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": "FoobaZ"}, + "values should match and be case-sensitive", + ) + self._assert_not_matches( + condition, + {"value": "test foobaz test"}, + "values must exactly match", + ) + value: Any + for value in (True, False, 1, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + # it should work on frozendicts too + self._assert_matches( + condition, + frozendict.frozendict({"value": "foobaz"}), + "values should match on frozendicts", + ) + + def test_exact_event_match_boolean(self) -> None: + """Check that exact_event_match conditions work as expected for booleans.""" + + # Test against a True boolean value. + condition = {"kind": "event_property_is", "key": "content.value", "value": True} + self._assert_matches( + condition, + {"value": True}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": False}, + "incorrect values should not match", + ) + for value in ("foobaz", 1, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + # Test against a False boolean value. + condition = { + "kind": "event_property_is", + "key": "content.value", + "value": False, + } + self._assert_matches( + condition, + {"value": False}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": True}, + "incorrect values should not match", + ) + # Choose false-y values to ensure there's no type coercion. + for value in ("", 0, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + def test_exact_event_match_null(self) -> None: + """Check that exact_event_match conditions work as expected for null.""" + + condition = {"kind": "event_property_is", "key": "content.value", "value": None} + self._assert_matches( + condition, + {"value": None}, + "exact value should match", + ) + for value in ("foobaz", True, False, 1, 1.1, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + def test_exact_event_match_integer(self) -> None: + """Check that exact_event_match conditions work as expected for integers.""" + + condition = {"kind": "event_property_is", "key": "content.value", "value": 1} + self._assert_matches( + condition, + {"value": 1}, + "exact value should match", + ) + value: Any + for value in (1.1, -1, 0): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect values should not match", + ) + for value in ("1", True, False, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + def test_exact_event_property_contains(self) -> None: + """Check that exact_event_property_contains conditions work as expected.""" + + condition = { + "kind": "event_property_contains", + "key": "content.value", + "value": "foobaz", + } + self._assert_matches( + condition, + {"value": ["foobaz"]}, + "exact value should match", + ) + self._assert_matches( + condition, + {"value": ["foobaz", "bugz"]}, + "extra values should match", + ) + self._assert_not_matches( + condition, + {"value": ["FoobaZ"]}, + "values should match and be case-sensitive", + ) + self._assert_not_matches( + condition, + {"value": "foobaz"}, + "does not search in a string", + ) + + # it should work on frozendicts too + self._assert_matches( + condition, + freeze({"value": ["foobaz"]}), + "values should match on frozendicts", + ) + def test_no_body(self) -> None: """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) @@ -837,8 +988,6 @@ def test_reactions(self) -> None: dm_evaluator = PushRuleEvaluator( _flatten_dict(reaction_event), False, - set(), - False, 2, 0, {}, @@ -850,21 +999,23 @@ def test_reactions(self) -> None: # Reaction to Brad's message, should be an action for Brad actions = dm_evaluator.run( - FilteredPushRules(PushRules([]), {}, True, True, True, True, True), "@brad:beeper.com", "Brad" + FilteredPushRules(PushRules([]), {}, True, True, True, True, True), + "@brad:beeper.com", + "Brad", ) self.assertTrue("notify" in actions) # Reaction to Brad's message, should not be an action for Nick actions = dm_evaluator.run( - FilteredPushRules(PushRules([]), {}, True, True, True, True, True), "@nick:beeper.com", "Nick" + FilteredPushRules(PushRules([]), {}, True, True, True, True, True), + "@nick:beeper.com", + "Nick", ) self.assertEqual(actions, []) large_room_evaluator = PushRuleEvaluator( _flatten_dict(reaction_event), False, - set(), - False, 30, 0, {}, @@ -876,11 +1027,15 @@ def test_reactions(self) -> None: # Large rooms should never have emoji reaction notifications actions = large_room_evaluator.run( - FilteredPushRules(PushRules([]), {}, True, True, True, True, True), "@brad:beeper.com", "Brad" + FilteredPushRules(PushRules([]), {}, True, True, True, True, True), + "@brad:beeper.com", + "Brad", ) self.assertEqual(actions, []) actions = large_room_evaluator.run( - FilteredPushRules(PushRules([]), {}, True, True, True, True, True), "@nick:beeper.com", "Nick" + FilteredPushRules(PushRules([]), {}, True, True, True, True, True), + "@nick:beeper.com", + "Nick", ) self.assertEqual(actions, []) @@ -905,8 +1060,6 @@ def test_supress_auto_accept_invite(self) -> None: evaluator = PushRuleEvaluator( _flatten_dict(event), False, - set(), - False, 0, 0, {}, diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index f4df28b193a8..5d24eea0ac6c 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -64,7 +64,6 @@ def unpatch() -> None: class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): - STORE_TYPE = EventsWorkerStore def setUp(self) -> None: diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 043dbe76afb0..65ef4bb16055 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Optional +from typing import Any, List, Optional, Sequence from twisted.test.proto_helpers import MemoryReactor @@ -139,7 +139,7 @@ def test_update_function_huge_state_change(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: List[str] = self.get_success( + fork_point: Sequence[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -168,7 +168,7 @@ def test_update_function_huge_state_change(self) -> None: pl_event = self.get_success( inject_event( self.hs, - prev_event_ids=prev_events, + prev_event_ids=list(prev_events), type=EventTypes.PowerLevels, state_key="", sender=self.user_id, @@ -294,7 +294,7 @@ def test_update_function_state_row_limit(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: List[str] = self.get_success( + fork_point: Sequence[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -323,7 +323,7 @@ def test_update_function_state_row_limit(self) -> None: e = self.get_success( inject_event( self.hs, - prev_event_ids=prev_events, + prev_event_ids=list(prev_events), type=EventTypes.PowerLevels, state_key="", sender=self.user_id, diff --git a/tests/replication/tcp/streams/test_partial_state.py b/tests/replication/tcp/streams/test_partial_state.py index 38b5020ce0e1..452ac8506996 100644 --- a/tests/replication/tcp/streams/test_partial_state.py +++ b/tests/replication/tcp/streams/test_partial_state.py @@ -37,7 +37,7 @@ def test_un_partial_stated_room_unblocks_over_replication(self) -> None: room_id = self.helper.create_room_as("@bob:test") # Mark the room as partial-stated. self.get_success( - self.store.store_partial_state_room(room_id, ["serv1", "serv2"], 0, "serv1") + self.store.store_partial_state_room(room_id, {"serv1", "serv2"}, 0, "serv1") ) worker = self.make_worker_hs("synapse.app.generic_worker") diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 68de5d1cc280..5a38ac831f26 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -13,7 +13,7 @@ # limitations under the License. from unittest.mock import Mock -from synapse.handlers.typing import RoomMember +from synapse.handlers.typing import RoomMember, TypingWriterHandler from synapse.replication.tcp.streams import TypingStream from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -33,6 +33,7 @@ def _build_replication_data_handler(self) -> Mock: def test_typing(self) -> None: typing = self.hs.get_typing_handler() + assert isinstance(typing, TypingWriterHandler) self.reconnect() @@ -88,6 +89,7 @@ def test_reset(self) -> None: sends the proper position and RDATA). """ typing = self.hs.get_typing_handler() + assert isinstance(typing, TypingWriterHandler) self.reconnect() diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index 6e4055cc2102..bab77b2df787 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -127,6 +127,7 @@ def test_wait_for_stream_position(self) -> None: # ... updating the cache ID gen on the master still shouldn't cause the # deferred to wake up. + assert store._cache_id_gen is not None ctx = store._cache_id_gen.get_next() self.get_success(ctx.__aenter__()) self.get_success(ctx.__aexit__(None, None, None)) @@ -140,3 +141,64 @@ def test_wait_for_stream_position(self) -> None: self.get_success(ctx_worker1.__aexit__(None, None, None)) self.assertTrue(d.called) + + def test_wait_for_stream_position_rdata(self) -> None: + """Check that wait for stream position correctly waits for an update + from the correct instance, when RDATA is sent. + """ + store = self.hs.get_datastores().main + cmd_handler = self.hs.get_replication_command_handler() + data_handler = self.hs.get_replication_data_handler() + + worker1 = self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "worker_name": "worker1", + "run_background_tasks_on": "worker1", + "redis": {"enabled": True}, + }, + ) + + cache_id_gen = worker1.get_datastores().main._cache_id_gen + assert cache_id_gen is not None + + self.replicate() + + # First, make sure the master knows that `worker1` exists. + initial_token = cache_id_gen.get_current_token() + cmd_handler.send_command( + PositionCommand("caches", "worker1", initial_token, initial_token) + ) + self.replicate() + + # `wait_for_stream_position` should only return once master receives a + # notification that `next_token2` has persisted. + ctx_worker1 = cache_id_gen.get_next_mult(2) + next_token1, next_token2 = self.get_success(ctx_worker1.__aenter__()) + + d = defer.ensureDeferred( + data_handler.wait_for_stream_position("worker1", "caches", next_token2) + ) + self.assertFalse(d.called) + + # Insert an entry into the cache stream with token `next_token1`, but + # not `next_token2`. + self.get_success( + store.db_pool.simple_insert( + table="cache_invalidation_stream_by_instance", + values={ + "stream_id": next_token1, + "instance_name": "worker1", + "cache_func": "foo", + "keys": [], + "invalidation_ts": 0, + }, + ) + ) + + # Finish the context manager, triggering the data to be sent to master. + self.get_success(ctx_worker1.__aexit__(None, None, None)) + + # Master should get told about `next_token2`, so the deferred should + # resolve. + self.assertTrue(d.called) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 89380e25b59a..08703206a9c9 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -16,6 +16,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory +from synapse.handlers.typing import TypingWriterHandler from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room from synapse.types import UserID, create_requester @@ -174,6 +175,7 @@ def test_send_typing_sharded(self) -> None: token = self.login("user3", "pass") typing_handler = self.hs.get_typing_handler() + assert isinstance(typing_handler, TypingWriterHandler) sent_on_1 = False sent_on_2 = False diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 9345cfbeb266..0798b021c3d0 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -50,6 +50,7 @@ def _create_pusher_and_send_msg(self, localpart: str) -> str: user_dict = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_dict is not None token_id = user_dict.token_id self.get_success( diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 03f2112b07ce..aaa488bced1e 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -28,7 +28,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -291,7 +290,6 @@ def test_delete_device(self) -> None: class DevicesRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -415,7 +413,6 @@ def test_get_devices(self) -> None: class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 233eba351690..f189b0776910 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -78,7 +78,7 @@ def test_no_auth(self) -> None: """ Try to get an event report without authentication. """ - channel = self.make_request("GET", self.url, b"{}") + channel = self.make_request("GET", self.url, {}) self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -473,7 +473,7 @@ def test_no_auth(self) -> None: """ Try to get event report without authentication. """ - channel = self.make_request("GET", self.url, b"{}") + channel = self.make_request("GET", self.url, {}) self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -599,3 +599,142 @@ def _check_fields(self, content: JsonDict) -> None: self.assertIn("room_id", content["event_json"]) self.assertIn("sender", content["event_json"]) self.assertIn("content", content["event_json"]) + + +class DeleteEventReportTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self._store = hs.get_datastores().main + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + # create report + event_id = self.get_success( + self._store.add_event_report( + "room_id", + "event_id", + self.other_user, + "this makes me sad", + {}, + self.clock.time_msec(), + ) + ) + + self.url = f"/_synapse/admin/v1/event_reports/{event_id}" + + def test_no_auth(self) -> None: + """ + Try to delete event report without authentication. + """ + channel = self.make_request("DELETE", self.url) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self) -> None: + """ + If the user is not a server admin, an error 403 is returned. + """ + + channel = self.make_request( + "DELETE", + self.url, + access_token=self.other_user_tok, + ) + + self.assertEqual(403, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_delete_success(self) -> None: + """ + Testing delete a report. + """ + + channel = self.make_request( + "DELETE", + self.url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual({}, channel.json_body) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + + # check that report was deleted + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_invalid_report_id(self) -> None: + """ + Testing that an invalid `report_id` returns a 400. + """ + + # `report_id` is negative + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/-123", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is a non-numerical string + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/abcdef", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is undefined + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + def test_report_id_not_found(self) -> None: + """ + Testing that a not existing `report_id` returns a 404. + """ + + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/123", + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + self.assertEqual("Event report not found", channel.json_body["error"]) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index aadb31ca838e..6d04911d6740 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -20,8 +20,8 @@ import synapse.rest.admin from synapse.api.errors import Codes +from synapse.media.filepath import MediaFilePaths from synapse.rest.client import login, profile, room -from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.server import HomeServer from synapse.util import Clock @@ -34,7 +34,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -196,7 +195,6 @@ def test_delete_media(self) -> None: class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -213,7 +211,8 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user_tok = self.login("admin", "pass") self.filepaths = MediaFilePaths(hs.config.media.media_store_path) - self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name + self.url = "/_synapse/admin/v1/media/delete" + self.legacy_url = "/_synapse/admin/v1/media/%s/delete" % self.server_name # Move clock up to somewhat realistic time self.reactor.advance(1000000000) @@ -332,11 +331,13 @@ def test_invalid_parameter(self) -> None: channel.json_body["error"], ) - def test_delete_media_never_accessed(self) -> None: + @parameterized.expand([(True,), (False,)]) + def test_delete_media_never_accessed(self, use_legacy_url: bool) -> None: """ Tests that media deleted if it is older than `before_ts` and never accessed `last_access_ts` is `NULL` and `created_ts` < `before_ts` """ + url = self.legacy_url if use_legacy_url else self.url # upload and do not access server_and_media_id = self._create_media() @@ -351,7 +352,7 @@ def test_delete_media_never_accessed(self) -> None: now_ms = self.clock.time_msec() channel = self.make_request( "POST", - self.url + "?before_ts=" + str(now_ms), + url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) @@ -591,7 +592,6 @@ def _access_media( class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -721,7 +721,6 @@ def test_quarantine_protected_media(self) -> None: class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -818,7 +817,6 @@ def test_protect_media(self) -> None: class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 453a6e979c02..eb50086c508e 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -402,6 +402,21 @@ def test_shutdown_room_block_peek(self) -> None: # Assert we can no longer peek into the room self._assert_peek(self.room_id, expect_code=403) + def test_room_delete_send(self) -> None: + """Test that sending into a deleted room returns a 403""" + channel = self.make_request( + "DELETE", + self.url, + content={}, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + + self.helper.send( + self.room_id, "test message", expect_code=403, tok=self.other_user_tok + ) + def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" d = self.store.is_room_blocked(room_id) @@ -1990,7 +2005,6 @@ def test_room_messages_purge(self) -> None: class JoinAliasRoomTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, room.register_servlets, diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index a2f347f666e8..28b999573e75 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Sequence from twisted.test.proto_helpers import MemoryReactor @@ -28,7 +28,6 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -558,7 +557,7 @@ def test_update_notice_user_avatar_when_changed(self) -> None: def _check_invite_and_join_status( self, user_id: str, expected_invites: int, expected_memberships: int - ) -> List[RoomsForUser]: + ) -> Sequence[RoomsForUser]: """Check invite and room membership status of a user. Args diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 5c1ced355ff1..4b8f889a71ba 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -28,8 +28,8 @@ from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions +from synapse.media.filepath import MediaFilePaths from synapse.rest.client import devices, login, logout, profile, register, room, sync -from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.server import HomeServer from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock @@ -2913,7 +2913,8 @@ def test_get_rooms_with_nonlocal_user(self) -> None: other_user_tok = self.login("user", "pass") event_builder_factory = self.hs.get_event_builder_factory() event_creation_handler = self.hs.get_event_creation_handler() - storage_controllers = self.hs.get_storage_controllers() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None # Create two rooms, one with a local user only and one with both a local # and remote user. @@ -2934,11 +2935,13 @@ def test_get_rooms_with_nonlocal_user(self) -> None: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creation_handler.create_new_client_event(builder) ) - self.get_success(storage_controllers.persistence.persist_event(event, context)) + context = self.get_success(unpersisted_context.persist(event)) + + self.get_success(persistence.persist_event(event, context)) # Now get rooms url = "/_synapse/admin/v1/users/@joiner:remote_hs/joined_rooms" diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index 30f12f1bffc7..6c04e6c56cc2 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -33,9 +35,14 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - async def check_username(username: str) -> bool: - if username == "allowed": - return True + async def check_username( + localpart: str, + guest_access_token: Optional[str] = None, + assigned_user_id: Optional[str] = None, + inhibit_user_in_use_error: bool = False, + ) -> None: + if localpart == "allowed": + return raise SynapseError( 400, "User ID already taken.", @@ -43,7 +50,7 @@ async def check_username(username: str) -> bool: ) handler = self.hs.get_registration_handler() - handler.check_username = check_username + handler.check_username = check_username # type: ignore[assignment] def test_username_available(self) -> None: """ diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 88f255c9eea9..7f675c44a278 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -40,7 +40,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): - servlets = [ account.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -408,7 +407,6 @@ def _reset_password( class DeactivateTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -492,7 +490,6 @@ def deactivate(self, user_id: str, tok: str) -> None: class WhoamiTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -567,7 +564,6 @@ def _whoami(self, tok: str) -> JsonDict: class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): - servlets = [ account.register_servlets, login.register_servlets, @@ -1193,7 +1189,7 @@ async def post_json( return {} # Register a mock that will return the expected result depending on the remote. - self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) + self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) # type: ignore[assignment] # Check that we've got the correct response from the client-side endpoint. self._test_status( @@ -1253,9 +1249,8 @@ async def is_expired(user_id: str) -> bool: # account status will fail. return UserID.from_string(user_id).localpart == "someuser" - self.hs.get_account_validity_handler()._is_user_expired_callbacks.append( - is_expired - ) + account_validity_callbacks = self.hs.get_module_api_callbacks().account_validity + account_validity_callbacks.is_user_expired_callbacks.append(is_expired) self._test_status( users=[user], diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 208ec4482970..0d8fe77b884f 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -34,7 +34,7 @@ from tests import unittest from tests.handlers.test_oidc import HAS_OIDC from tests.rest.client.utils import TEST_OIDC_CONFIG, TEST_OIDC_ISSUER -from tests.server import FakeChannel, make_request +from tests.server import FakeChannel from tests.unittest import override_config, skip_unless @@ -43,13 +43,15 @@ def __init__(self, hs: HomeServer) -> None: super().__init__(hs) self.recaptcha_attempts: List[Tuple[dict, str]] = [] + def is_enabled(self) -> bool: + return True + def check_auth(self, authdict: dict, clientip: str) -> Any: self.recaptcha_attempts.append((authdict, clientip)) return succeed(True) class FallbackAuthTests(unittest.HomeserverTestCase): - servlets = [ auth.register_servlets, register.register_servlets, @@ -57,7 +59,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["enable_registration_captcha"] = True @@ -1319,16 +1320,8 @@ def test_logout_during_login(self) -> None: channel = self.submit_logout_token(logout_token) self.assertEqual(channel.code, 200) - # Now try to exchange the login token - channel = make_request( - self.hs.get_reactor(), - self.site, - "POST", - "/login", - content={"type": "m.login.token", "token": login_token}, - ) - # It should have failed - self.assertEqual(channel.code, 403) + # Now try to exchange the login token, it should fail. + self.helper.login_via_token(login_token, 403) @override_config( { diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index d1751e1557f7..c16e8d43f419 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -26,7 +26,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, capabilities.register_servlets, diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index b1ca81a91191..bb845179d3f8 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -38,7 +38,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["form_secret"] = "123abc" diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index 7a88aa2cda7f..6490e883bf28 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -28,7 +28,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, directory.register_servlets, diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 9fa1f82dfee0..f31ebc8021bb 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -26,7 +26,6 @@ class EphemeralMessageTestCase(unittest.HomeserverTestCase): - user_id = "@user:test" servlets = [ diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index a9b7db9db227..54df2a252c01 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -38,7 +38,6 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["enable_registration_captcha"] = False config["enable_registration"] = True @@ -51,7 +50,6 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # register an account self.user_id = self.register_user("sid1", "pass") self.token = self.login(self.user_id, "pass") @@ -142,7 +140,6 @@ class GetEventsTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # register an account self.user_id = self.register_user("sid1", "pass") self.token = self.login(self.user_id, "pass") diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index afc8d641bef2..91678abf1311 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -25,7 +25,6 @@ class FilterTestCase(unittest.HomeserverTestCase): - user_id = "@apple:test" hijack_auth = True EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}} @@ -63,14 +62,14 @@ def test_add_filter_for_other_user(self) -> None: def test_add_filter_non_local_user(self) -> None: _is_mine = self.hs.is_mine - self.hs.is_mine = lambda target_user: False + self.hs.is_mine = lambda target_user: False # type: ignore[assignment] channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % (self.user_id), self.EXAMPLE_FILTER_JSON, ) - self.hs.is_mine = _is_mine + self.hs.is_mine = _is_mine # type: ignore[assignment] self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index 741fecea7713..8ee548905704 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -14,12 +14,21 @@ from http import HTTPStatus +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) +from signedjson.sign import sign_json + from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import keys, login +from synapse.types import JsonDict from tests import unittest from tests.http.server._base import make_request_with_cancellation_test +from tests.unittest import override_config class KeyQueryTestCase(unittest.HomeserverTestCase): @@ -118,3 +127,135 @@ def test_key_query_cancellation(self) -> None: self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertIn(bob, channel.json_body["device_keys"]) + + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: + # We only generate a master key to simplify the test. + master_signing_key = generate_signing_key(device_id) + master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key)) + + return { + "master_key": sign_json( + { + "user_id": user_id, + "usage": ["master"], + "keys": {"ed25519:" + master_verify_key: master_verify_key}, + }, + user_id, + master_signing_key, + ), + } + + def test_device_signing_with_uia(self) -> None: + """Device signing key upload requires UIA.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + content = self.make_device_keys(alice_id, device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + # Grab the session + session = channel.json_body["session"] + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + + # add UI auth + content["auth"] = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": alice_id}, + "password": password, + "session": session, + } + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + @override_config({"ui_auth": {"session_timeout": "15m"}}) + def test_device_signing_with_uia_session_timeout(self) -> None: + """Device signing key upload requires UIA buy passes with grace period.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + content = self.make_device_keys(alice_id, device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + @override_config( + { + "experimental_features": {"msc3967_enabled": True}, + "ui_auth": {"session_timeout": "15s"}, + } + ) + def test_device_signing_with_msc3967(self) -> None: + """Device signing key follows MSC3967 behaviour when enabled.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + keys1 = self.make_device_keys(alice_id, device_id) + + # Initial request should succeed as no existing keys are present. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys1, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + keys2 = self.make_device_keys(alice_id, device_id) + + # Subsequent request should require UIA as keys already exist even though session_timeout is set. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys2, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + + # Grab the session + session = channel.json_body["session"] + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + + # add UI auth + keys2["auth"] = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": alice_id}, + "password": password, + "session": session, + } + + # Request should complete + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys2, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index ff5baa9f0a78..62acf4f44ed0 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -89,7 +89,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -737,7 +736,6 @@ def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: class CASTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, ] diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py index 6aedc1a11c9e..b8187db982bf 100644 --- a/tests/rest/client/test_login_token_request.py +++ b/tests/rest/client/test_login_token_request.py @@ -26,7 +26,6 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, admin.register_servlets, diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index b3738a03046a..dcbb125a3bba 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -35,15 +35,14 @@ class PresenceTestCase(unittest.HomeserverTestCase): servlets = [presence.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - - presence_handler = Mock(spec=PresenceHandler) - presence_handler.set_state.return_value = make_awaitable(None) + self.presence_handler = Mock(spec=PresenceHandler) + self.presence_handler.set_state.return_value = make_awaitable(None) hs = self.setup_test_homeserver( "red", federation_http_client=None, federation_client=Mock(), - presence_handler=presence_handler, + presence_handler=self.presence_handler, ) return hs @@ -61,7 +60,7 @@ def test_put_presence(self) -> None: ) self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) + self.assertEqual(self.presence_handler.set_state.call_count, 1) @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self) -> None: @@ -76,4 +75,4 @@ def test_put_presence_disabled(self) -> None: ) self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 0) + self.assertEqual(self.presence_handler.set_state.call_count, 0) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 8de5a342aeef..27c93ad76122 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -30,7 +30,6 @@ class ProfileTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -324,7 +323,6 @@ def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -404,7 +402,6 @@ def ensure_requester_left_room(self) -> None: class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 11cf3939d84b..b228dba8613d 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -40,7 +40,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, register.register_servlets, @@ -151,7 +150,7 @@ def test_POST_disabled_registration(self) -> None: self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") def test_POST_guest_registration(self) -> None: - self.hs.config.key.macaroon_secret_key = "test" + self.hs.config.key.macaroon_secret_key = b"test" self.hs.config.registration.allow_guest_access = True channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") @@ -797,7 +796,6 @@ def test_require_approval(self) -> None: class AccountValidityTestCase(unittest.HomeserverTestCase): - servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -913,7 +911,6 @@ def test_logging_out_expired_user(self) -> None: class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): - servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -1132,7 +1129,6 @@ def test_manual_email_send_expired_account(self) -> None: class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): - servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: @@ -1166,12 +1162,15 @@ def test_background_job(self) -> None: """ user_id = self.register_user("kermit_delta", "user") - self.hs.config.account_validity.startup_job_max_delta = self.max_delta + self.hs.config.account_validity.account_validity_startup_job_max_delta = ( + self.max_delta + ) now_ms = self.hs.get_clock().time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + assert res is not None self.assertGreaterEqual(res, now_ms + self.validity_period - self.max_delta) self.assertLessEqual(res, now_ms + self.validity_period) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c8a6911d5ee5..cc47ac560f28 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -403,7 +403,7 @@ def _assert_edit_bundle( def test_edit(self) -> None: """Test that a simple edit works.""" - + orig_body = {"body": "Hi!", "msgtype": "m.text"} new_body = {"msgtype": "m.text", "body": "I've been edited!"} edit_event_content = { "msgtype": "m.text", @@ -424,9 +424,7 @@ def test_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"} - ) + self.assertEqual(channel.json_body["content"], orig_body) self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) # Request the room messages. @@ -443,7 +441,7 @@ def test_edit(self) -> None: ) # Request the room context. - # /context should return the edited event. + # /context should return the event. channel = self.make_request( "GET", f"/rooms/{self.room}/context/{self.parent_id}", @@ -453,7 +451,7 @@ def test_edit(self) -> None: self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) # Request sync, but limit the timeline so it becomes limited (and includes # bundled aggregations). @@ -491,45 +489,11 @@ def test_edit(self) -> None: edit_event_content, ) - @override_config({"experimental_features": {"msc3925_inhibit_edit": True}}) - def test_edit_inhibit_replace(self) -> None: - """ - If msc3925_inhibit_edit is enabled, then the original event should not be - replaced. - """ - - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - edit_event_content = { - "msgtype": "m.text", - "body": "foo", - "m.new_content": new_body, - } - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content=edit_event_content, - ) - edit_event_id = channel.json_body["event_id"] - - # /context should return the *original* event. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["event"]["content"], {"body": "Hi!", "msgtype": "m.text"} - ) - self._assert_edit_bundle( - channel.json_body["event"], edit_event_id, edit_event_content - ) - def test_multi_edit(self) -> None: """Test that multiple edits, including attempts by people who shouldn't be allowed, are correctly handled. """ - + orig_body = orig_body = {"body": "Hi!", "msgtype": "m.text"} self._send_relation( RelationTypes.REPLACE, "m.room.message", @@ -570,7 +534,7 @@ def test_multi_edit(self) -> None: ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) @@ -642,6 +606,7 @@ def test_edit_reply(self) -> None: def test_edit_edit(self) -> None: """Test that an edit cannot be edited.""" + orig_body = {"body": "Hi!", "msgtype": "m.text"} new_body = {"msgtype": "m.text", "body": "Initial edit"} edit_event_content = { "msgtype": "m.text", @@ -675,14 +640,12 @@ def test_edit_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"} - ) + self.assertEqual(channel.json_body["content"], orig_body) # The relations information should not include the edit to the edit. self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) - # /context should return the event updated for the *first* edit + # /context should return the bundled edit for the *first* edit # (The edit to the edit should be ignored.) channel = self.make_request( "GET", @@ -690,7 +653,7 @@ def test_edit_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) @@ -1080,48 +1043,6 @@ def assert_bundle(event_json: JsonDict) -> None: ] assert_bundle(self._find_event_in_chunk(chunk)) - def test_annotation(self) -> None: - """ - Test that annotations get correctly bundled. - """ - # Setup by sending a variety of relations. - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - - def assert_annotations(bundled_aggregations: JsonDict) -> None: - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - bundled_aggregations, - ) - - self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) - - def test_annotation_to_annotation(self) -> None: - """Any relation to an annotation should be ignored.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - event_id = channel.json_body["event_id"] - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=event_id - ) - - # Fetch the initial annotation event to see if it has bundled aggregations. - channel = self.make_request( - "GET", - f"/_matrix/client/v3/rooms/{self.room}/event/{event_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - # The first annotationt should not have any bundled aggregations. - self.assertNotIn("m.relations", channel.json_body["unsigned"]) - def test_reference(self) -> None: """ Test that references get correctly bundled. @@ -1138,7 +1059,7 @@ def assert_annotations(bundled_aggregations: JsonDict) -> None: bundled_aggregations, ) - self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6) def test_thread(self) -> None: """ @@ -1183,7 +1104,7 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: # The "user" sent the root event and is making queries for the bundled # aggregations: they have participated. - self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7) + self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 6) # The "user2" sent replies in the thread and is making queries for the # bundled aggregations: they have participated. # @@ -1208,9 +1129,10 @@ def test_thread_with_bundled_aggregations_for_latest(self) -> None: channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_2 = channel.json_body["event_id"] - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 + channel = self._send_relation( + RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_2 ) + reference_event_id = channel.json_body["event_id"] def assert_thread(bundled_aggregations: JsonDict) -> None: self.assertEqual(2, bundled_aggregations.get("count")) @@ -1235,17 +1157,15 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: self.assert_dict( { "m.relations": { - RelationTypes.ANNOTATION: { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 1}, - ] + RelationTypes.REFERENCE: { + "chunk": [{"event_id": reference_event_id}] }, } }, bundled_aggregations["latest_event"].get("unsigned"), ) - self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7) + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 6) def test_nested_thread(self) -> None: """ @@ -1330,7 +1250,6 @@ def test_thread_edit_latest_event(self) -> None: thread_summary = relations_dict[RelationTypes.THREAD] self.assertIn("latest_event", thread_summary) latest_event_in_thread = thread_summary["latest_event"] - self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") # The latest event in the thread should have the edit appear under the # bundled aggregations. self.assertDictContainsSubset( @@ -1363,10 +1282,11 @@ def test_aggregation_get_event_for_thread(self) -> None: channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_id = channel.json_body["event_id"] - # Annotate the thread. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id + # Make a reference to the thread. + channel = self._send_relation( + RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_id ) + reference_event_id = channel.json_body["event_id"] channel = self.make_request( "GET", @@ -1377,9 +1297,7 @@ def test_aggregation_get_event_for_thread(self) -> None: self.assertEqual( channel.json_body["unsigned"].get("m.relations"), { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, }, ) @@ -1396,9 +1314,7 @@ def test_aggregation_get_event_for_thread(self) -> None: self.assertEqual( thread_message["unsigned"].get("m.relations"), { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, }, ) @@ -1410,7 +1326,8 @@ def test_bundled_aggregations_with_filter(self) -> None: Note that the spec allows for a server to return additional fields beyond what is specified. """ - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test") + reference_event_id = channel.json_body["event_id"] # Note that the sync filter does not include "unsigned" as a field. filter = urllib.parse.quote_plus( @@ -1428,7 +1345,12 @@ def test_bundled_aggregations_with_filter(self) -> None: # Ensure there's bundled aggregations on it. self.assertIn("unsigned", parent_event) - self.assertIn("m.relations", parent_event["unsigned"]) + self.assertEqual( + parent_event["unsigned"].get("m.relations"), + { + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, + }, + ) class RelationIgnoredUserTestCase(BaseRelationsTestCase): @@ -1475,53 +1397,8 @@ def _test_ignored_user( return before_aggregations[relation_type], after_aggregations[relation_type] - def test_annotation(self) -> None: - """Annotations should ignore""" - # Send 2 from us, 2 from the to be ignored user. - allowed_event_ids = [] - ignored_event_ids = [] - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - allowed_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="b") - allowed_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="a", - access_token=self.user2_token, - ) - ignored_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="c", - access_token=self.user2_token, - ) - ignored_event_ids.append(channel.json_body["event_id"]) - - before_aggregations, after_aggregations = self._test_ignored_user( - RelationTypes.ANNOTATION, allowed_event_ids, ignored_event_ids - ) - - self.assertCountEqual( - before_aggregations["chunk"], - [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - {"type": "m.reaction", "key": "c", "count": 1}, - ], - ) - - self.assertCountEqual( - after_aggregations["chunk"], - [ - {"type": "m.reaction", "key": "a", "count": 1}, - {"type": "m.reaction", "key": "b", "count": 1}, - ], - ) - def test_reference(self) -> None: - """Annotations should ignore""" + """Aggregations should exclude reference relations from ignored users""" channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") allowed_event_ids = [channel.json_body["event_id"]] @@ -1544,7 +1421,7 @@ def test_reference(self) -> None: ) def test_thread(self) -> None: - """Annotations should ignore""" + """Aggregations should exclude thread releations from ignored users""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") allowed_event_ids = [channel.json_body["event_id"]] @@ -1618,43 +1495,6 @@ def _get_threads(self) -> List[Tuple[str, str]]: for t in threads ] - def test_redact_relation_annotation(self) -> None: - """ - Test that annotations of an event are properly handled after the - annotation is redacted. - - The redacted relation should not be included in bundled aggregations or - the response to relations. - """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - to_redact_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - unredacted_event_id = channel.json_body["event_id"] - - # Both relations should exist. - event_ids = self._get_related_events() - relations = self._get_bundled_aggregations() - self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) - self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]}, - ) - - # Redact one of the reactions. - self._redact(to_redact_event_id) - - # The unredacted relation should still exist. - event_ids = self._get_related_events() - relations = self._get_bundled_aggregations() - self.assertEquals(event_ids, [unredacted_event_id]) - self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, - ) - def test_redact_relation_thread(self) -> None: """ Test that thread replies are properly handled after the thread reply redacted. @@ -1775,14 +1615,14 @@ def test_redact_parent_annotation(self) -> None: is redacted. """ # Add a relation - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test") related_event_id = channel.json_body["event_id"] # The relations should exist. event_ids = self._get_related_events() relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) - self.assertIn(RelationTypes.ANNOTATION, relations) + self.assertIn(RelationTypes.REFERENCE, relations) # Redact the original event. self._redact(self.parent_id) @@ -1792,8 +1632,8 @@ def test_redact_parent_annotation(self) -> None: relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [related_event_id]) self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]}, + relations[RelationTypes.REFERENCE], + {"chunk": [{"event_id": related_event_id}]}, ) def test_redact_parent_thread(self) -> None: diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py index c0eb5d01a65b..8dbd64be552b 100644 --- a/tests/rest/client/test_rendezvous.py +++ b/tests/rest/client/test_rendezvous.py @@ -25,7 +25,6 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): - servlets = [ rendezvous.register_servlets, ] diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 7cb1017a4a44..b88f1d61a081 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -73,6 +73,60 @@ def test_reason_and_score_null(self) -> None: data = {"reason": None, "score": None} self._assert_status(400, data) + def test_cannot_report_nonexistent_event(self) -> None: + """ + Tests that we don't accept event reports for events which do not exist. + """ + channel = self.make_request( + "POST", + f"rooms/{self.room_id}/report/$nonsenseeventid:test", + {"reason": "i am very sad"}, + access_token=self.other_user_tok, + ) + self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual( + "Unable to report event: it does not exist or you aren't able to see it.", + channel.json_body["error"], + msg=channel.result["body"], + ) + + def test_cannot_report_event_if_not_in_room(self) -> None: + """ + Tests that we don't accept event reports for events that exist, but for which + the reporter should not be able to view (because they are not in the room). + """ + # Have the admin user create a room (the "other" user will not join this room). + new_room_id = self.helper.create_room_as(tok=self.admin_user_tok) + + # Have the admin user send an event in this room. + response = self.helper.send_event( + new_room_id, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "This event has some bad words in it! Flip!", + }, + tok=self.admin_user_tok, + ) + event_id = response["event_id"] + + # Have the "other" user attempt to report it. Perhaps they found the event ID + # in a screenshot or something... + channel = self.make_request( + "POST", + f"rooms/{new_room_id}/report/{event_id}", + {"reason": "I'm not in this room but I have opinions anyways!"}, + access_token=self.other_user_tok, + ) + + # The "other" user is not in the room, so their report should be rejected. + self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual( + "Unable to report event: it does not exist or you aren't able to see it.", + channel.json_body["error"], + msg=channel.result["body"], + ) + def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( "POST", self.report_path, data, access_token=self.other_user_tok diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 9c8c1889d3ea..d3e06bf6b3d1 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -136,6 +136,7 @@ def test_visibility(self) -> None: # Send a first event, which should be filtered out at the end of the test. resp = self.helper.send(room_id=room_id, body="1", tok=self.token) first_event_id = resp.get("event_id") + assert isinstance(first_event_id, str) # Advance the time by 2 days. We're using the default retention policy, therefore # after this the first event will still be valid. @@ -144,6 +145,7 @@ def test_visibility(self) -> None: # Send another event, which shouldn't get filtered out. resp = self.helper.send(room_id=room_id, body="2", tok=self.token) valid_event_id = resp.get("event_id") + assert isinstance(valid_event_id, str) # Advance the time by another 2 days. After this, the first event should be # outdated but not the second one. @@ -229,7 +231,7 @@ def _test_retention_event_purged(self, room_id: str, increment: float) -> None: # Check that we can still access state events that were sent before the event that # has been purged. - self.get_event(room_id, create_event.event_id) + self.get_event(room_id, bool(create_event)) def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: event = self.get_success(self.store.get_event(event_id, allow_none=True)) @@ -238,7 +240,7 @@ def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: self.assertIsNone(event) return {} - self.assertIsNotNone(event) + assert event is not None time_now = self.clock.time_msec() serialized = self.serializer.serialize_event(event, time_now) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 9222cab19801..a4900703c415 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -65,7 +65,6 @@ class RoomBase(unittest.HomeserverTestCase): servlets = [room.register_servlets, room.register_deprecated_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.hs = self.setup_test_homeserver( "red", federation_http_client=None, @@ -92,7 +91,6 @@ class RoomPermissionsTestCase(RoomBase): rmcreator_id = "@notme:red" def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id self.uncreated_rmid = "!aa:test" @@ -715,7 +713,7 @@ def test_post_room_no_keys(self) -> None: self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(33, channel.resource_usage.db_txn_count) + self.assertEqual(30, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -728,7 +726,7 @@ def test_post_room_initial_state(self) -> None: self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(36, channel.resource_usage.db_txn_count) + self.assertEqual(32, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id @@ -1127,7 +1125,6 @@ def test_invites_by_users_ratelimit(self) -> None: class RoomJoinTestCase(RoomBase): - servlets = [ admin.register_servlets, login.register_servlets, @@ -2102,7 +2099,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): hijack_auth = False def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # Register the user who does the searching self.user_id2 = self.register_user("user", "pass") self.access_token = self.login("user", "pass") @@ -2195,7 +2191,6 @@ def test_include_context(self) -> None: class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2203,7 +2198,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.url = b"/_matrix/client/r0/publicRooms" config = self.default_config() @@ -2225,7 +2219,6 @@ def test_restricted_auth(self) -> None: class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2233,7 +2226,6 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["allow_public_rooms_without_auth"] = True self.hs = self.setup_test_homeserver(config=config) @@ -2414,7 +2406,6 @@ def test_fallback(self) -> None: class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2983,7 +2974,6 @@ def _filter_messages(self, filter: JsonDict) -> List[str]: class ContextTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -3359,7 +3349,6 @@ def test_bad_alias(self) -> None: class ThreepidInviteTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets, login.register_servlets, @@ -3382,8 +3371,8 @@ def test_threepid_invite_spamcheck_deprecated(self) -> None: # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock - self.hs.get_identity_handler().lookup_3pid = Mock( + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] + self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] return_value=make_awaitable(None), ) @@ -3438,13 +3427,14 @@ def test_threepid_invite_spamcheck(self) -> None: """ Test allowing/blocking threepid invites with a spam-check module. - In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.""" + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + """ # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock - self.hs.get_identity_handler().lookup_3pid = Mock( + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] + self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] return_value=make_awaitable(None), ) @@ -3563,8 +3553,10 @@ def _inject_outlier(self, room_id: str) -> EventBase: ) event.internal_metadata.outlier = True + persistence = self._storage_controllers.persistence + assert persistence is not None self.get_success( - self._storage_controllers.persistence.persist_event( + persistence.persist_event( event, EventContext.for_outlier(self._storage_controllers) ) ) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index c807a37bc2ea..8d2cdf875150 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -84,7 +84,7 @@ def test_invite(self) -> None: def test_invite_3pid(self) -> None: """Ensure that a 3PID invite does not attempt to contact the identity server.""" identity_handler = self.hs.get_identity_handler() - identity_handler.lookup_3pid = Mock( + identity_handler.lookup_3pid = Mock( # type: ignore[assignment] side_effect=AssertionError("This should not get called") ) @@ -222,7 +222,7 @@ def test_typing(self) -> None: event_source.get_new_events( user=UserID.from_string(self.other_user_id), from_key=0, - limit=None, + limit=10, room_ids=[room_id], is_guest=False, ) @@ -286,6 +286,7 @@ def test_displayname(self) -> None: self.banned_user_id, ) ) + assert event is not None self.assertEqual( event.content, {"membership": "join", "displayname": original_display_name} ) @@ -321,6 +322,7 @@ def test_room_displayname(self) -> None: self.banned_user_id, ) ) + assert event is not None self.assertEqual( event.content, {"membership": "join", "displayname": original_display_name} ) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 2cb624ce7b57..e6a843088829 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -41,7 +41,6 @@ class FilterTestCase(unittest.HomeserverTestCase): - user_id = "@apple:test" servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -191,7 +190,6 @@ def _test_sync_filter_labels(self, sync_filter: str) -> List[JsonDict]: class SyncTypingTests(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -1248,7 +1246,6 @@ def test_user_with_no_rooms_receives_self_device_list_updates(self) -> None: class ExcludeRoomTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 99b77598b212..197942860386 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -137,6 +137,7 @@ def test_third_party_rules(self) -> None: """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ + # patch the rules module with a Mock which will return False for some event # types async def check( @@ -243,6 +244,7 @@ async def check( def test_modify_event(self) -> None: """The module can return a modified version of the event""" + # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] @@ -275,6 +277,7 @@ async def check( def test_message_edit(self) -> None: """Ensure that the module doesn't cause issues with edited messages.""" + # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] @@ -429,7 +432,7 @@ def test_sent_event_end_up_in_room_state(self) -> None: async def test_fn( event: EventBase, state_events: StateMap[EventBase] ) -> Tuple[bool, Optional[JsonDict]]: - if event.is_state and event.type == EventTypes.PowerLevels: + if event.is_state() and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { "room_id": event.room_id, @@ -935,3 +938,125 @@ def test_on_threepid_bind(self) -> None: # Check that the mock was called with the right parameters self.assertEqual(args, (user_id, "email", "foo@example.com")) + + def test_on_add_and_remove_user_third_party_identifier(self) -> None: + """Tests that the on_add_user_third_party_identifier and + on_remove_user_third_party_identifier module callbacks are called + just before associating and removing a 3PID to/from an account. + """ + # Pretend to be a Synapse module and register both callbacks as mocks. + on_add_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + on_remove_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules.register_third_party_rules_callbacks( + on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Also register a normal user we can modify. + user_id = self.register_user("user", "password") + + # Add a 3PID to the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [ + { + "medium": "email", + "address": "foo@example.com", + }, + ], + }, + access_token=admin_tok, + ) + + # Check that the mocked add callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_add_user_third_party_identifier_callback_mock.assert_called_once() + args = on_add_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) + + # Now remove the 3PID from the user + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [], + }, + access_token=admin_tok, + ) + + # Check that the mocked remove callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_remove_user_third_party_identifier_callback_mock.assert_called_once() + args = on_remove_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) + + def test_on_remove_user_third_party_identifier_is_called_on_deactivate( + self, + ) -> None: + """Tests that the on_remove_user_third_party_identifier module callback is called + when a user is deactivated and their third-party ID associations are deleted. + """ + # Pretend to be a Synapse module and register both callbacks as mocks. + on_remove_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules.register_third_party_rules_callbacks( + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Also register a normal user we can modify. + user_id = self.register_user("user", "password") + + # Add a 3PID to the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [ + { + "medium": "email", + "address": "foo@example.com", + }, + ], + }, + access_token=admin_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Check that the mock was not called on the act of adding a third-party ID. + on_remove_user_third_party_identifier_callback_mock.assert_not_called() + + # Now deactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "deactivated": True, + }, + access_token=admin_tok, + ) + + # Check that the mocked remove callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_remove_user_third_party_identifier_callback_mock.assert_called_once() + args = on_remove_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 3086e1b5650b..d8dc56261ac1 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -39,15 +39,23 @@ def setUp(self) -> None: self.cache = HttpTransactionCache(self.hs) self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"}) - self.mock_key = "foo" + + # Here we make sure that we're setting all the fields that HttpTransactionCache + # uses to build the transaction key. + self.mock_request = Mock() + self.mock_request.path = b"/foo/bar" + self.mock_requester = Mock() + self.mock_requester.app_service = None + self.mock_requester.is_guest = False + self.mock_requester.access_token_id = 1234 @defer.inlineCallbacks def test_executes_given_function( self, ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) - res = yield self.cache.fetch_or_execute( - self.mock_key, cb, "some_arg", keyword="arg" + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg" ) cb.assert_called_once_with("some_arg", keyword="arg") self.assertEqual(res, self.mock_http_response) @@ -58,8 +66,13 @@ def test_deduplicates_based_on_key( ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) for i in range(3): # invoke multiple times - res = yield self.cache.fetch_or_execute( - self.mock_key, cb, "some_arg", keyword="arg", changing_args=i + res = yield self.cache.fetch_or_execute_request( + self.mock_request, + self.mock_requester, + cb, + "some_arg", + keyword="arg", + changing_args=i, ) self.assertEqual(res, self.mock_http_response) # expect only a single call to do the work @@ -77,7 +90,9 @@ def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]: @defer.inlineCallbacks def test() -> Generator["defer.Deferred[Any]", object, None]: with LoggingContext("c") as c1: - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertIs(current_context(), c1) self.assertEqual(res, (1, {})) @@ -106,12 +121,16 @@ def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": with LoggingContext("test") as test_context: try: - yield self.cache.fetch_or_execute(self.mock_key, cb) + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) except Exception as e: self.assertEqual(e.args[0], "boo") self.assertIs(current_context(), test_context) - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertEqual(res, self.mock_http_response) self.assertIs(current_context(), test_context) @@ -134,29 +153,39 @@ def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": with LoggingContext("test") as test_context: try: - yield self.cache.fetch_or_execute(self.mock_key, cb) + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) except Exception as e: self.assertEqual(e.args[0], "boo") self.assertIs(current_context(), test_context) - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertEqual(res, self.mock_http_response) self.assertIs(current_context(), test_context) @defer.inlineCallbacks def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # should NOT have cleaned up yet self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # still using cache cb.assert_called_once_with("an arg") self.clock.advance_time_msec(CLEANUP_PERIOD_MS) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # no longer using cache self.assertEqual(cb.call_count, 2) self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")]) diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 04c352b66666..352e9c83f63d 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -85,7 +85,7 @@ def test_upgrade(self) -> None: self.room_id, EventTypes.Tombstone, "" ) ) - self.assertIsNotNone(tombstone_event) + assert tombstone_event is not None self.assertEqual(new_room_id, tombstone_event.content["replacement_room"]) # Check that the new room exists. diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 8d6f2b6ff9cc..9532e5ddc102 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -36,6 +36,7 @@ import attr from typing_extensions import Literal +from twisted.test.proto_helpers import MemoryReactorClock from twisted.web.resource import Resource from twisted.web.server import Site @@ -67,6 +68,7 @@ class RestHelper: """ hs: HomeServer + reactor: MemoryReactorClock site: Site auth_user_id: Optional[str] @@ -142,7 +144,7 @@ def create_room_as( path = path + "?access_token=%s" % tok channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", path, @@ -216,7 +218,7 @@ def knock( data["reason"] = reason channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", path, @@ -313,7 +315,7 @@ def change_membership( data.update(extra_data or {}) channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "PUT", path, @@ -394,7 +396,7 @@ def send_event( path = path + "?access_token=%s" % tok channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "PUT", path, @@ -433,7 +435,7 @@ def get_event( path = path + f"?access_token={tok}" channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", path, @@ -488,7 +490,7 @@ def _read_write_state( if body is not None: content = json.dumps(body).encode("utf8") - channel = make_request(self.hs.get_reactor(), self.site, method, path, content) + channel = make_request(self.reactor, self.site, method, path, content) assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, @@ -573,8 +575,8 @@ def upload_media( image_length = len(image_data) path = "/_matrix/media/r0/upload?filename=%s" % (filename,) channel = make_request( - self.hs.get_reactor(), - FakeSite(resource, self.hs.get_reactor()), + self.reactor, + FakeSite(resource, self.reactor), "POST", path, content=image_data, @@ -603,7 +605,7 @@ def whoami( expect_code: The return code to expect from attempting the whoami request """ channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", "account/whoami", @@ -642,7 +644,7 @@ def login_via_oidc( ) -> Tuple[JsonDict, FakeAuthorizationGrant]: """Log in (as a new user) via OIDC - Returns the result of the final token login. + Returns the result of the final token login and the fake authorization grant. Requires that "oidc_config" in the homeserver config be set appropriately (TEST_OIDC_CONFIG is a suitable example) - and by implication, needs a @@ -672,10 +674,28 @@ def login_via_oidc( assert m, channel.text_body login_token = m.group(1) - # finally, submit the matrix login token to the login API, which gives us our - # matrix access token and device id. + return self.login_via_token(login_token, expected_status), grant + + def login_via_token( + self, + login_token: str, + expected_status: int = 200, + ) -> JsonDict: + """Submit the matrix login token to the login API, which gives us our + matrix access token and device id.Log in (as a new user) via OIDC + + Returns the result of the token login. + + Requires that "oidc_config" in the homeserver config be set appropriately + (TEST_OIDC_CONFIG is a suitable example) - and by implication, needs a + "public_base_url". + + Also requires the login servlet and the OIDC callback resource to be mounted at + the normal places. + """ + channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", "/login", @@ -684,7 +704,7 @@ def login_via_oidc( assert ( channel.code == expected_status ), f"unexpected status in response: {channel.code}" - return channel.json_body, grant + return channel.json_body def auth_via_oidc( self, @@ -805,7 +825,7 @@ def complete_oidc_auth( with fake_serer.patch_homeserver(hs=self.hs): # now hit the callback URI with the right params and a made-up code channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", callback_uri, @@ -849,7 +869,7 @@ def initiate_sso_login( # is the easiest way of figuring out what the Host header ought to be set to # to keep Synapse happy. channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", uri, @@ -867,7 +887,7 @@ def get_location(channel: FakeChannel) -> str: location = get_location(channel) parts = urllib.parse.urlsplit(location) channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", urllib.parse.urlunsplit(("", "") + parts[2:]), @@ -900,9 +920,7 @@ def initiate_sso_ui_auth( + urllib.parse.urlencode({"session": ui_auth_session_id}) ) # hit the redirect url (which will issue a cookie and state) - channel = make_request( - self.hs.get_reactor(), self.site, "GET", sso_redirect_endpoint - ) + channel = make_request(self.reactor, self.site, "GET", sso_redirect_endpoint) # that should serve a confirmation page assert channel.code == HTTPStatus.OK, channel.text_body channel.extract_cookies(cookies) diff --git a/tests/rest/media/test_media_retention.py b/tests/rest/media/test_media_retention.py index 23f227aed69f..b59d9dfd4da0 100644 --- a/tests/rest/media/test_media_retention.py +++ b/tests/rest/media/test_media_retention.py @@ -31,7 +31,6 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): - ONE_DAY_IN_MS = 24 * 60 * 60 * 1000 THIRTY_DAYS_IN_MS = 30 * ONE_DAY_IN_MS diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/test_url_preview.py similarity index 93% rename from tests/rest/media/v1/test_url_preview.py rename to tests/rest/media/test_url_preview.py index 2c321f8d04fc..e44beae8c13f 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -26,8 +26,8 @@ from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from synapse.config.oembed import OEmbedEndpointConfig -from synapse.rest.media.v1.media_repository import MediaRepositoryResource -from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS +from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS +from synapse.rest.media.media_repository_resource import MediaRepositoryResource from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -36,7 +36,6 @@ from tests import unittest from tests.server import FakeTransport from tests.test_utils import SMALL_PNG -from tests.utils import MockClock try: import lxml @@ -58,7 +57,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["url_preview_enabled"] = True config["max_spider_size"] = 9999999 @@ -83,7 +81,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config["media_store_path"] = self.media_store_path provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", + "module": "synapse.media.storage_provider.FileStorageProviderBackend", "store_local": True, "store_synchronous": False, "store_remote": True, @@ -118,9 +116,9 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - - self.media_repo = hs.get_media_repository_resource() - self.preview_url = self.media_repo.children[b"preview_url"] + self.media_repo = hs.get_media_repository() + media_repo_resource = hs.get_media_repository_resource() + self.preview_url = media_repo_resource.children[b"preview_url"] self.lookups: Dict[str, Any] = {} @@ -133,7 +131,6 @@ def resolveHostName( addressTypes: Optional[Sequence[Type[IAddress]]] = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: - resolution = HostResolution(hostName) resolutionReceiver.resolutionBegan(resolution) if hostName not in self.lookups: @@ -196,9 +193,9 @@ def test_cache_returns_correct_type(self) -> None: ) # Clear the in-memory cache - self.assertIn("http://matrix.org", self.preview_url._cache) - self.preview_url._cache.pop("http://matrix.org") - self.assertNotIn("http://matrix.org", self.preview_url._cache) + self.assertIn("http://matrix.org", self.preview_url._url_previewer._cache) + self.preview_url._url_previewer._cache.pop("http://matrix.org") + self.assertNotIn("http://matrix.org", self.preview_url._url_previewer._cache) # Check the database cache returns the correct response channel = self.make_request( @@ -660,7 +657,7 @@ def test_nonexistent_image(self) -> None: """If the preview image doesn't exist, ensure some data is returned.""" self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] - end_content = ( + result = ( b"""""" ) @@ -681,8 +678,8 @@ def test_nonexistent_image(self) -> None: b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" b'Content-Type: text/html; charset="utf8"\r\n\r\n' ) - % (len(end_content),) - + end_content + % (len(result),) + + result ) self.pump() @@ -691,6 +688,44 @@ def test_nonexistent_image(self) -> None: # The image should not be in the result. self.assertNotIn("og:image", channel.json_body) + def test_oembed_failure(self) -> None: + """If the autodiscovered oEmbed URL fails, ensure some data is returned.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + result = b""" + oEmbed Autodiscovery Fail + + """ + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + + self.pump() + self.assertEqual(channel.code, 200) + + # The image should not be in the result. + self.assertEqual(channel.json_body["og:title"], "oEmbed Autodiscovery Fail") + def test_data_url(self) -> None: """ Requesting to preview a data URL is not supported. @@ -1038,7 +1073,7 @@ def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() - rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id) media_store_path = os.path.join(self.media_store_path, rel_file_path) storage_provider_path = os.path.join(self.storage_path, rel_file_path) @@ -1081,7 +1116,7 @@ def test_storage_providers_exclude_thumbnails(self) -> None: host, media_id = self._download_image() rel_thumbnail_path = ( - self.preview_url.filepaths.url_cache_thumbnail_directory_rel(media_id) + self.media_repo.filepaths.url_cache_thumbnail_directory_rel(media_id) ) media_store_thumbnail_path = os.path.join( self.media_store_path, rel_thumbnail_path @@ -1108,7 +1143,7 @@ def test_storage_providers_exclude_thumbnails(self) -> None: self.assertEqual(channel.code, 200) # Remove the original, otherwise thumbnails will regenerate - rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id) media_store_path = os.path.join(self.media_store_path, rel_file_path) os.remove(media_store_path) @@ -1131,26 +1166,24 @@ def test_storage_providers_exclude_thumbnails(self) -> None: def test_cache_expiry(self) -> None: """Test that URL cache files and thumbnails are cleaned up properly on expiry.""" - self.preview_url.clock = MockClock() - _host, media_id = self._download_image() - file_path = self.preview_url.filepaths.url_cache_filepath(media_id) - file_dirs = self.preview_url.filepaths.url_cache_filepath_dirs_to_delete( + file_path = self.media_repo.filepaths.url_cache_filepath(media_id) + file_dirs = self.media_repo.filepaths.url_cache_filepath_dirs_to_delete( media_id ) - thumbnail_dir = self.preview_url.filepaths.url_cache_thumbnail_directory( + thumbnail_dir = self.media_repo.filepaths.url_cache_thumbnail_directory( media_id ) - thumbnail_dirs = self.preview_url.filepaths.url_cache_thumbnail_dirs_to_delete( + thumbnail_dirs = self.media_repo.filepaths.url_cache_thumbnail_dirs_to_delete( media_id ) self.assertTrue(os.path.isfile(file_path)) self.assertTrue(os.path.isdir(thumbnail_dir)) - self.preview_url.clock.advance_time_msec(IMAGE_CACHE_EXPIRY_MS + 1) - self.get_success(self.preview_url._expire_url_cache_data()) + self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1) + self.get_success(self.preview_url._url_previewer._expire_url_cache_data()) for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs: self.assertFalse( diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 22f99c6ab1ce..3285f2433ccf 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -12,29 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Optional from unittest.mock import Mock, patch from synapse._scripts.register_new_matrix_user import request_registration +from synapse.types import JsonDict from tests.unittest import TestCase class RegisterTestCase(TestCase): - def test_success(self): + def test_success(self) -> None: """ The script will fetch a nonce, and then generate a MAC with it, and then post that MAC. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r - def post(url, json=None, verify=None): + def post( + url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + ) -> Mock: # Make sure we are sent the correct info + assert json is not None self.assertEqual(json["username"], "user") self.assertEqual(json["password"], "pass") self.assertEqual(json["nonce"], "a") @@ -70,12 +74,12 @@ def post(url, json=None, verify=None): # sys.exit shouldn't have been called. self.assertEqual(err_code, []) - def test_failure_nonce(self): + def test_failure_nonce(self) -> None: """ If the script fails to fetch a nonce, it throws an error and quits. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 404 r.reason = "Not Found" @@ -107,20 +111,23 @@ def get(url, verify=None): self.assertIn("ERROR! Received 404 Not Found", out) self.assertNotIn("Success!", out) - def test_failure_post(self): + def test_failure_post(self) -> None: """ The script will fetch a nonce, and then if the final POST fails, will report an error and quit. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r - def post(url, json=None, verify=None): + def post( + url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + ) -> Mock: # Make sure we are sent the correct info + assert json is not None self.assertEqual(json["username"], "user") self.assertEqual(json["password"], "pass") self.assertEqual(json["nonce"], "a") diff --git a/tests/server.py b/tests/server.py index 237bcad8ba6f..5de97227669c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -22,20 +22,25 @@ from collections import deque from io import SEEK_END, BytesIO from typing import ( + Any, + Awaitable, Callable, Dict, Iterable, List, MutableMapping, Optional, + Sequence, Tuple, Type, + TypeVar, Union, + cast, ) from unittest.mock import Mock import attr -from typing_extensions import Deque +from typing_extensions import Deque, ParamSpec from zope.interface import implementer from twisted.internet import address, threads, udp @@ -44,8 +49,10 @@ from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import ( IAddress, + IConnector, IConsumer, IHostnameResolver, + IProducer, IProtocol, IPullProducer, IPushProducer, @@ -54,6 +61,8 @@ IResolverSimple, ITransport, ) +from twisted.internet.protocol import ClientFactory, DatagramProtocol +from twisted.python import threadpool from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock from twisted.web.http_headers import Headers @@ -61,6 +70,7 @@ from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig +from synapse.config.homeserver import HomeServerConfig from synapse.events.presence_router import load_legacy_presence_router from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules @@ -88,6 +98,9 @@ logger = logging.getLogger(__name__) +R = TypeVar("R") +P = ParamSpec("P") + # the type of thing that can be passed into `make_request` in the headers list CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]] @@ -98,12 +111,14 @@ class TimedOutException(Exception): """ -@implementer(IConsumer) +@implementer(ITransport, IPushProducer, IConsumer) @attr.s(auto_attribs=True) class FakeChannel: """ A fake Twisted Web Channel (the part that interfaces with the wire). + + See twisted.web.http.HTTPChannel. """ site: Union[Site, "FakeSite"] @@ -142,7 +157,7 @@ def text_body(self) -> str: Raises an exception if the request has not yet completed. """ - if not self.is_finished: + if not self.is_finished(): raise Exception("Request not yet completed") return self.result["body"].decode("utf8") @@ -165,27 +180,36 @@ def headers(self) -> Headers: h.addRawHeader(*i) return h - def writeHeaders(self, version, code, reason, headers): + def writeHeaders( + self, version: bytes, code: bytes, reason: bytes, headers: Headers + ) -> None: self.result["version"] = version self.result["code"] = code self.result["reason"] = reason self.result["headers"] = headers - def write(self, content: bytes) -> None: - assert isinstance(content, bytes), "Should be bytes! " + repr(content) + def write(self, data: bytes) -> None: + assert isinstance(data, bytes), "Should be bytes! " + repr(data) if "body" not in self.result: self.result["body"] = b"" - self.result["body"] += content + self.result["body"] += data + + def writeSequence(self, data: Iterable[bytes]) -> None: + for x in data: + self.write(x) + + def loseConnection(self) -> None: + self.unregisterProducer() + self.transport.loseConnection() # Type ignore: mypy doesn't like the fact that producer isn't an IProducer. - def registerProducer( # type: ignore[override] - self, - producer: Union[IPullProducer, IPushProducer], - streaming: bool, - ) -> None: - self._producer = producer + def registerProducer(self, producer: IProducer, streaming: bool) -> None: + # TODO This should ensure that the IProducer is an IPushProducer or + # IPullProducer, unfortunately twisted.protocols.basic.FileSender does + # implement those, but doesn't declare it. + self._producer = cast(Union[IPushProducer, IPullProducer], producer) self.producerStreaming = streaming def _produce() -> None: @@ -202,6 +226,16 @@ def unregisterProducer(self) -> None: self._producer = None + def stopProducing(self) -> None: + if self._producer is not None: + self._producer.stopProducing() + + def pauseProducing(self) -> None: + raise NotImplementedError() + + def resumeProducing(self) -> None: + raise NotImplementedError() + def requestDone(self, _self: Request) -> None: self.result["done"] = True if isinstance(_self, SynapseRequest): @@ -281,12 +315,12 @@ def __init__( self.reactor = reactor self.experimental_cors_msc3886 = experimental_cors_msc3886 - def getResourceFor(self, request): + def getResourceFor(self, request: Request) -> IResource: return self._resource def make_request( - reactor, + reactor: MemoryReactorClock, site: Union[Site, FakeSite], method: Union[bytes, str], path: Union[bytes, str], @@ -409,19 +443,21 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): A MemoryReactorClock that supports callFromThread. """ - def __init__(self): + def __init__(self) -> None: self.threadpool = ThreadPool(self) self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {} - self._udp = [] + self._udp: List[udp.Port] = [] self.lookups: Dict[str, str] = {} - self._thread_callbacks: Deque[Callable[[], None]] = deque() + self._thread_callbacks: Deque[Callable[..., R]] = deque() lookups = self.lookups @implementer(IResolverSimple) class FakeResolver: - def getHostByName(self, name, timeout=None): + def getHostByName( + self, name: str, timeout: Optional[Sequence[int]] = None + ) -> "Deferred[str]": if name not in lookups: return fail(DNSLookupError("OH NO: unknown %s" % (name,))) return succeed(lookups[name]) @@ -432,25 +468,44 @@ def getHostByName(self, name, timeout=None): def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver: raise NotImplementedError() - def listenUDP(self, port, protocol, interface="", maxPacketSize=8196): + def listenUDP( + self, + port: int, + protocol: DatagramProtocol, + interface: str = "", + maxPacketSize: int = 8196, + ) -> udp.Port: p = udp.Port(port, protocol, interface, maxPacketSize, self) p.startListening() self._udp.append(p) return p - def callFromThread(self, callback, *args, **kwargs): + def callFromThread( + self, callable: Callable[..., Any], *args: object, **kwargs: object + ) -> None: """ Make the callback fire in the next reactor iteration. """ - cb = lambda: callback(*args, **kwargs) + cb = lambda: callable(*args, **kwargs) # it's not safe to call callLater() here, so we append the callback to a # separate queue. self._thread_callbacks.append(cb) - def getThreadPool(self): - return self.threadpool + def callInThread( + self, callable: Callable[..., Any], *args: object, **kwargs: object + ) -> None: + raise NotImplementedError() + + def suggestThreadPoolSize(self, size: int) -> None: + raise NotImplementedError() + + def getThreadPool(self) -> "threadpool.ThreadPool": + # Cast to match super-class. + return cast(threadpool.ThreadPool, self.threadpool) - def add_tcp_client_callback(self, host: str, port: int, callback: Callable): + def add_tcp_client_callback( + self, host: str, port: int, callback: Callable[[], None] + ) -> None: """Add a callback that will be invoked when we receive a connection attempt to the given IP/port using `connectTCP`. @@ -459,7 +514,14 @@ def add_tcp_client_callback(self, host: str, port: int, callback: Callable): """ self._tcp_callbacks[(host, port)] = callback - def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None): + def connectTCP( + self, + host: str, + port: int, + factory: ClientFactory, + timeout: float = 30, + bindAddress: Optional[Tuple[str, int]] = None, + ) -> IConnector: """Fake L{IReactorTCP.connectTCP}.""" conn = super().connectTCP( @@ -472,7 +534,7 @@ def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None return conn - def advance(self, amount): + def advance(self, amount: float) -> None: # first advance our reactor's time, and run any "callLater" callbacks that # makes ready super().advance(amount) @@ -500,25 +562,33 @@ def advance(self, amount): class ThreadPool: """ Threadless thread pool. + + See twisted.python.threadpool.ThreadPool """ - def __init__(self, reactor): + def __init__(self, reactor: IReactorTime): self._reactor = reactor - def start(self): + def start(self) -> None: pass - def stop(self): + def stop(self) -> None: pass - def callInThreadWithCallback(self, onResult, function, *args, **kwargs): - def _(res): + def callInThreadWithCallback( + self, + onResult: Callable[[bool, Union[Failure, R]], None], + function: Callable[P, R], + *args: P.args, + **kwargs: P.kwargs, + ) -> "Deferred[None]": + def _(res: Any) -> None: if isinstance(res, Failure): onResult(False, res) else: onResult(True, res) - d = Deferred() + d: "Deferred[None]" = Deferred() d.addCallback(lambda x: function(*args, **kwargs)) d.addBoth(_) self._reactor.callLater(0, d.callback, True) @@ -535,7 +605,9 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None: for database in server.get_datastores().databases: pool = database._db_pool - def runWithConnection(func, *args, **kwargs): + def runWithConnection( + func: Callable[..., R], *args: Any, **kwargs: Any + ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, @@ -545,20 +617,23 @@ def runWithConnection(func, *args, **kwargs): **kwargs, ) - def runInteraction(interaction, *args, **kwargs): + def runInteraction( + desc: str, func: Callable[..., R], *args: Any, **kwargs: Any + ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, pool._runInteraction, - interaction, + desc, + func, *args, **kwargs, ) - pool.runWithConnection = runWithConnection - pool.runInteraction = runInteraction + pool.runWithConnection = runWithConnection # type: ignore[assignment] + pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool - pool.threadpool = ThreadPool(clock._reactor) + pool.threadpool = ThreadPool(clock._reactor) # type: ignore[assignment] pool.running = True # We've just changed the Databases to run DB transactions on the same @@ -573,7 +648,7 @@ def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: @implementer(ITransport) -@attr.s(cmp=False) +@attr.s(cmp=False, auto_attribs=True) class FakeTransport: """ A twisted.internet.interfaces.ITransport implementation which sends all its data @@ -588,48 +663,50 @@ class FakeTransport: If you want bidirectional communication, you'll need two instances. """ - other = attr.ib() + other: IProtocol """The Protocol object which will receive any data written to this transport. - - :type: twisted.internet.interfaces.IProtocol """ - _reactor = attr.ib() + _reactor: IReactorTime """Test reactor - - :type: twisted.internet.interfaces.IReactorTime """ - _protocol = attr.ib(default=None) + _protocol: Optional[IProtocol] = None """The Protocol which is producing data for this transport. Optional, but if set will get called back for connectionLost() notifications etc. """ - _peer_address: Optional[IAddress] = attr.ib(default=None) + _peer_address: IAddress = attr.Factory( + lambda: address.IPv4Address("TCP", "127.0.0.1", 5678) + ) """The value to be returned by getPeer""" - _host_address: Optional[IAddress] = attr.ib(default=None) + _host_address: IAddress = attr.Factory( + lambda: address.IPv4Address("TCP", "127.0.0.1", 1234) + ) """The value to be returned by getHost""" disconnecting = False disconnected = False connected = True - buffer = attr.ib(default=b"") - producer = attr.ib(default=None) - autoflush = attr.ib(default=True) + buffer: bytes = b"" + producer: Optional[IPushProducer] = None + autoflush: bool = True - def getPeer(self) -> Optional[IAddress]: + def getPeer(self) -> IAddress: return self._peer_address - def getHost(self) -> Optional[IAddress]: + def getHost(self) -> IAddress: return self._host_address - def loseConnection(self, reason=None): + def loseConnection(self) -> None: if not self.disconnecting: - logger.info("FakeTransport: loseConnection(%s)", reason) + logger.info("FakeTransport: loseConnection()") self.disconnecting = True if self._protocol: - self._protocol.connectionLost(reason) + self._protocol.connectionLost( + Failure(RuntimeError("FakeTransport.loseConnection()")) + ) # if we still have data to write, delay until that is done if self.buffer: @@ -640,38 +717,38 @@ def loseConnection(self, reason=None): self.connected = False self.disconnected = True - def abortConnection(self): + def abortConnection(self) -> None: logger.info("FakeTransport: abortConnection()") if not self.disconnecting: self.disconnecting = True if self._protocol: - self._protocol.connectionLost(None) + self._protocol.connectionLost(None) # type: ignore[arg-type] self.disconnected = True - def pauseProducing(self): + def pauseProducing(self) -> None: if not self.producer: return self.producer.pauseProducing() - def resumeProducing(self): + def resumeProducing(self) -> None: if not self.producer: return self.producer.resumeProducing() - def unregisterProducer(self): + def unregisterProducer(self) -> None: if not self.producer: return self.producer = None - def registerProducer(self, producer, streaming): + def registerProducer(self, producer: IPushProducer, streaming: bool) -> None: self.producer = producer self.producerStreaming = streaming - def _produce(): + def _produce() -> None: if not self.producer: # we've been unregistered return @@ -683,7 +760,7 @@ def _produce(): if not streaming: self._reactor.callLater(0.0, _produce) - def write(self, byt): + def write(self, byt: bytes) -> None: if self.disconnecting: raise Exception("Writing to disconnecting FakeTransport") @@ -695,11 +772,11 @@ def write(self, byt): if self.autoflush: self._reactor.callLater(0.0, self.flush) - def writeSequence(self, seq): + def writeSequence(self, seq: Iterable[bytes]) -> None: for x in seq: self.write(x) - def flush(self, maxbytes=None): + def flush(self, maxbytes: Optional[int] = None) -> None: if not self.buffer: # nothing to do. Don't write empty buffers: it upsets the # TLSMemoryBIOProtocol @@ -750,17 +827,17 @@ def connect_client( class TestHomeServer(HomeServer): - DATASTORE_CLASS = DataStore + DATASTORE_CLASS = DataStore # type: ignore[assignment] def setup_test_homeserver( - cleanup_func, - name="test", - config=None, - reactor=None, + cleanup_func: Callable[[Callable[[], None]], None], + name: str = "test", + config: Optional[HomeServerConfig] = None, + reactor: Optional[ISynapseReactor] = None, homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs, -): + **kwargs: Any, +) -> HomeServer: """ Setup a homeserver suitable for running tests against. Keyword arguments are passed to the Homeserver constructor. @@ -775,13 +852,14 @@ def setup_test_homeserver( HomeserverTestCase. """ if reactor is None: - from twisted.internet import reactor + from twisted.internet import reactor as _reactor + + reactor = cast(ISynapseReactor, _reactor) if config is None: config = default_config(name, parse=True) config.caches.resize_all_caches() - config.ldap_enabled = False if "clock" not in kwargs: kwargs["clock"] = MockClock() @@ -832,6 +910,8 @@ def setup_test_homeserver( # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() if isinstance(db_engine, PostgresEngine): + import psycopg2.extensions + db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER, @@ -839,6 +919,7 @@ def setup_test_homeserver( port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) + assert isinstance(db_conn, psycopg2.extensions.connection) db_conn.autocommit = True cur = db_conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) @@ -867,14 +948,15 @@ def setup_test_homeserver( hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): - database = hs.get_datastores().databases[0] + database_pool = hs.get_datastores().databases[0] # We need to do cleanup on PostgreSQL - def cleanup(): + def cleanup() -> None: import psycopg2 + import psycopg2.extensions # Close all the db pools - database._db_pool.close() + database_pool._db_pool.close() dropped = False @@ -886,6 +968,7 @@ def cleanup(): port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) + assert isinstance(db_conn, psycopg2.extensions.connection) db_conn.autocommit = True cur = db_conn.cursor() @@ -918,23 +1001,23 @@ def cleanup(): # Need to let the HS build an auth handler and then mess with it # because AuthHandler's constructor requires the HS, so we can't make one # beforehand and pass it in to the HS's constructor (chicken / egg) - async def hash(p): + async def hash(p: str) -> str: return hashlib.md5(p.encode("utf8")).hexdigest() - hs.get_auth_handler().hash = hash + hs.get_auth_handler().hash = hash # type: ignore[assignment] - async def validate_hash(p, h): + async def validate_hash(p: str, h: str) -> bool: return hashlib.md5(p.encode("utf8")).hexdigest() == h - hs.get_auth_handler().validate_hash = validate_hash + hs.get_auth_handler().validate_hash = validate_hash # type: ignore[assignment] # Make the threadpool and database transactions synchronous for testing. _make_test_homeserver_synchronous(hs) # Load any configured modules into the homeserver module_api = hs.get_module_api() - for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) + for module, module_config in hs.config.modules.loaded_modules: + module(config=module_config, api=module_api) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 58b399a04377..3fdf5a6d5207 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -14,14 +14,17 @@ import os +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest class ConsentNoticesTests(unittest.HomeserverTestCase): - servlets = [ sync.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -29,8 +32,7 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: tmpdir = self.mktemp() os.mkdir(tmpdir) self.consent_notice_message = "consent %(consent_uri)s" @@ -53,15 +55,13 @@ def make_homeserver(self, reactor, clock): "room_name": "Server Notices", } - hs = self.setup_test_homeserver(config=config) - - return hs + return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("bob", "abc123") self.access_token = self.login("bob", "abc123") - def test_get_sync_message(self): + def test_get_sync_message(self) -> None: """ When user consent server notices are enabled, a sync will cause a notice to fire (in a room which the user is invited to). The notice contains diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index be1be8d85939..0fbe57919a58 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -24,6 +24,8 @@ from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) +from synapse.server_notices.server_notices_sender import ServerNoticesSender +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -33,7 +35,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): - def default_config(self): + def default_config(self) -> JsonDict: config = default_config("test") config.update( @@ -58,14 +60,15 @@ def default_config(self): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.clock._reactor.advance(1) # type: ignore - self.server_notices_sender = self.hs.get_server_notices_sender() + server_notices_sender = self.hs.get_server_notices_sender() + assert isinstance(server_notices_sender, ServerNoticesSender) # relying on [1] is far from ideal, but the only case where # ResourceLimitsServerNotices class needs to be isolated is this test, # general code should never have a reason to do so ... - self._rlsn = self.server_notices_sender._server_notices[1] - if not isinstance(self._rlsn, ResourceLimitsServerNotices): - raise Exception("Failed to find reference to ResourceLimitsServerNotices") + rlsn = list(server_notices_sender._server_notices)[1] + assert isinstance(rlsn, ResourceLimitsServerNotices) + self._rlsn = rlsn self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(1000) @@ -87,39 +90,43 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) # type: ignore[assignment] @override_config({"hs_disabled": True}) - def test_maybe_send_server_notice_disabled_hs(self): + def test_maybe_send_server_notice_disabled_hs(self) -> None: """If the HS is disabled, we should not send notices""" self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() @override_config({"limit_usage_by_mau": False}) - def test_maybe_send_server_notice_to_user_flag_off(self): + def test_maybe_send_server_notice_to_user_flag_off(self) -> None: """If mau limiting is disabled, we should not send notices""" self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): + def test_maybe_send_server_notice_to_user_remove_blocked_notice(self) -> None: """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) # Would be better to check the content, but once == remove blocking event - self._rlsn._server_notices_manager.maybe_get_notice_room_for_user.assert_called_once() + maybe_get_notice_room_for_user = ( + self._rlsn._server_notices_manager.maybe_get_notice_room_for_user + ) + assert isinstance(maybe_get_notice_room_for_user, Mock) + maybe_get_notice_room_for_user.assert_called_once() self._send_notice.assert_called_once() - def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): + def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self) -> None: """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -127,7 +134,7 @@ def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) @@ -135,11 +142,11 @@ def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_add_blocked_notice(self): + def test_maybe_send_server_notice_to_user_add_blocked_notice(self) -> None: """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -148,11 +155,11 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice(self): # Would be better to check contents, but 2 calls == set blocking event self.assertEqual(self._send_notice.call_count, 2) - def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): + def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self) -> None: """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) @@ -160,12 +167,12 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): + def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self) -> None: """ Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) self._rlsn._store.user_last_seen_monthly_active = Mock( @@ -176,12 +183,14 @@ def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): self._send_notice.assert_not_called() @override_config({"mau_limit_alerting": False}) - def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self): + def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked( + self, + ) -> None: """ Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -192,11 +201,11 @@ def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self): self.assertEqual(self._send_notice.call_count, 0) @override_config({"mau_limit_alerting": False}) - def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self): + def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self) -> None: """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -208,26 +217,28 @@ def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self): self.assertEqual(self._send_notice.call_count, 2) @override_config({"mau_limit_alerting": False}) - def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): + def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked( + self, + ) -> None: """ When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) - self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( + self._rlsn._is_room_currently_blocked = Mock( # type: ignore[assignment] return_value=make_awaitable((True, [])) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -243,7 +254,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): sync.register_servlets, ] - def default_config(self): + def default_config(self) -> JsonDict: c = super().default_config() c["server_notices"] = { "system_mxid_localpart": "server", @@ -259,20 +270,22 @@ def default_config(self): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.clock._reactor.advance(1) # type: ignore self.store = self.hs.get_datastores().main - self.server_notices_sender = self.hs.get_server_notices_sender() self.server_notices_manager = self.hs.get_server_notices_manager() self.event_source = self.hs.get_event_sources() + server_notices_sender = self.hs.get_server_notices_sender() + assert isinstance(server_notices_sender, ServerNoticesSender) + # relying on [1] is far from ideal, but the only case where # ResourceLimitsServerNotices class needs to be isolated is this test, # general code should never have a reason to do so ... - self._rlsn = self.server_notices_sender._server_notices[1] - if not isinstance(self._rlsn, ResourceLimitsServerNotices): - raise Exception("Failed to find reference to ResourceLimitsServerNotices") + rlsn = list(server_notices_sender._server_notices)[1] + assert isinstance(rlsn, ResourceLimitsServerNotices) + self._rlsn = rlsn self.user_id = "@user_id:test" - def test_server_notice_only_sent_once(self): + def test_server_notice_only_sent_once(self) -> None: self.store.get_monthly_active_count = Mock(return_value=make_awaitable(1000)) self.store.user_last_seen_monthly_active = Mock( @@ -308,7 +321,7 @@ def test_server_notice_only_sent_once(self): self.assertEqual(count, 1) - def test_no_invite_without_notice(self): + def test_no_invite_without_notice(self) -> None: """Tests that a user doesn't get invited to a server notices room without a server notice being sent. @@ -330,7 +343,7 @@ def test_no_invite_without_notice(self): m.assert_called_once_with(user_id) - def test_invite_with_notice(self): + def test_invite_with_notice(self) -> None: """Tests that, if the MAU limit is hit, the server notices user invites each user to a room in which it has sent a notice. """ diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py index 373707b275c7..b6d5c474b0bd 100644 --- a/tests/storage/databases/main/test_deviceinbox.py +++ b/tests/storage/databases/main/test_deviceinbox.py @@ -23,7 +23,6 @@ class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, devices.register_servlets, diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 9f33afcca04d..9606ecc43b6b 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -120,6 +120,7 @@ def test_persisting_event_invalidates_cache(self) -> None: # Persist the event which should invalidate or prefill the # `have_seen_event` cache so we don't return stale values. persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None self.get_success( persistence.persist_event( event, diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index ac77aec003b1..71db47405ef5 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -26,7 +26,6 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, @@ -62,6 +61,7 @@ def _test_background_receipts_unique_index( keys and expected receipt key-values after duplicate receipts have been removed. """ + # First, undo the background update. def drop_receipts_unique_index(txn: LoggingTransaction) -> None: txn.execute(f"DROP INDEX IF EXISTS {index_name}") diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py index 3108ca344423..dbd8f3a85eea 100644 --- a/tests/storage/databases/main/test_room.py +++ b/tests/storage/databases/main/test_room.py @@ -27,7 +27,6 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 79c71b29cfd2..8f2bd3c278de 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -156,3 +156,25 @@ def test_invalid_data(self) -> None: # No one ignores the user now. self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) + + def test_ignoring_users_with_latest_stream_ids(self) -> None: + """Test that ignoring users updates the latest stream ID for the ignored + user list account data.""" + + def get_latest_ignore_streampos(user_id: str) -> Optional[int]: + return self.get_success( + self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( + user_id, AccountDataTypes.IGNORED_USER_LIST + ) + ) + + self.assertIsNone(get_latest_ignore_streampos("@user:test")) + + self._update_ignore_list("@other:test", "@another:remote") + + self.assertEqual(get_latest_ignore_streampos("@user:test"), 2) + + # Add one user, remove one user, and leave one user. + self._update_ignore_list("@foo:test", "@another:remote") + + self.assertEqual(get_latest_ignore_streampos("@user:test"), 3) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index f8b010b03c3a..9275abae0a79 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -44,8 +44,9 @@ def prepare( # Create a test user and room self.user = UserID("alice", "test") self.requester = create_requester(self.user) - info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) - self.room_id = info["room_id"] + self.room_id, _, _ = self.get_success( + self.room_creator.create_room(self.requester, {}) + ) def run_background_update(self) -> None: """Re run the background update to clean up the extremities.""" @@ -276,10 +277,9 @@ def prepare( self.user = UserID.from_string(self.register_user("user1", "password")) self.token1 = self.login("user1", "password") self.requester = create_requester(self.user) - info, _ = self.get_success( + self.room_id, _, _ = self.get_success( self.room_creator.create_room(self.requester, {"visibility": "public"}) ) - self.room_id = info["room_id"] self.event_creator = homeserver.get_event_creation_handler() homeserver.config.consent.user_consent_version = self.CONSENT_VERSION diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 7f7f4ef892bd..cd0079871ca2 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -656,7 +656,6 @@ def test_old_user_ips_pruned(self) -> None: class ClientIpAuthTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index c070278db80f..e39b63edac42 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -389,6 +389,7 @@ def persist( """ persist_events_store = self.hs.get_datastores().persist_events + assert persist_events_store is not None for e in events: e.internal_metadata.stream_ordering = self._next_stream_ordering @@ -397,6 +398,7 @@ def persist( def _persist(txn: LoggingTransaction) -> None: # We need to persist the events to the events and state_events # tables. + assert persist_events_store is not None persist_events_store._store_event_txn( txn, [(e, EventContext(self.hs.get_storage_controllers())) for e in events], @@ -415,7 +417,6 @@ def _persist(txn: LoggingTransaction) -> None: def fetch_chains( self, events: List[EventBase] ) -> Tuple[Dict[str, Tuple[int, int]], _LinkMap]: - # Fetch the map from event ID -> (chain ID, sequence number) rows = self.get_success( self.store.db_pool.simple_select_many_batch( @@ -490,7 +491,6 @@ def test_simple(self) -> None: class EventChainBackgroundUpdateTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, @@ -522,7 +522,7 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: latest_event_ids = self.get_success( self.store.get_prev_events_for_room(room_id) ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_handler.create_event( self.requester, { @@ -535,14 +535,17 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: prev_event_ids=latest_event_ids, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] ) ) - state1 = set(self.get_success(context.get_current_state_ids()).values()) + state_ids1 = self.get_success(context.get_current_state_ids()) + assert state_ids1 is not None + state1 = set(state_ids1.values()) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_handler.create_event( self.requester, { @@ -555,12 +558,15 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: prev_event_ids=latest_event_ids, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] ) ) - state2 = set(self.get_success(context.get_current_state_ids()).values()) + state_ids2 = self.get_success(context.get_current_state_ids()) + assert state_ids2 is not None + state2 = set(state_ids2.values()) # Delete the chain cover info. diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 7fd3e01364e9..3e1984c15cf0 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -54,6 +54,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + persist_events = hs.get_datastores().persist_events + assert persist_events is not None + self.persist_events = persist_events def test_get_prev_events_for_room(self) -> None: room_id = "@ROOM:local" @@ -226,7 +229,7 @@ def insert_event(txn: LoggingTransaction) -> None: }, ) - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id])) @@ -445,7 +448,7 @@ def insert_event(txn: LoggingTransaction) -> None: ) # Insert all events apart from 'B' - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id])) @@ -464,7 +467,7 @@ def insert_event(txn: LoggingTransaction) -> None: updatevalues={"has_auth_chain_index": False}, ) - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))], ) @@ -669,7 +672,7 @@ def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo: complete_event_dict_map: Dict[str, JsonDict] = {} stream_ordering = 0 - for (event_id, prev_event_ids) in event_graph.items(): + for event_id, prev_event_ids in event_graph.items(): depth = depth_map[event_id] complete_event_dict_map[event_id] = { diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 36de6f7862dd..eb963fdb5258 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -40,8 +40,7 @@ def test_exposed_to_prometheus(self) -> None: events = [(3, 2), (6, 2), (4, 6)] for event_count, extrems in events: - info, _ = self.get_success(room_creator.create_room(requester, {})) - room_id = info["room_id"] + room_id, _, _ = self.get_success(room_creator.create_room(requester, {})) last_event = None diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 76c06a9d1e72..aa19c3bd30d5 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -774,7 +774,7 @@ def add_event(so: int, ts: int) -> None: self.assertEqual(r, 3) # add a bunch of dummy events to the events table - for (stream_ordering, ts) in ( + for stream_ordering, ts in ( (3, 110), (4, 120), (5, 120), diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 05661a537dbd..e67dd0589db9 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -40,7 +40,9 @@ def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: self.state = self.hs.get_state_handler() - self._persistence = self.hs.get_storage_controllers().persistence + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persistence = persistence self._state_storage_controller = self.hs.get_storage_controllers().state self.store = self.hs.get_datastores().main @@ -374,7 +376,9 @@ def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: self.state = self.hs.get_state_handler() - self._persistence = self.hs.get_storage_controllers().persistence + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persistence = persistence self.store = self.hs.get_datastores().main def test_remote_user_rooms_cache_invalidated(self) -> None: diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index aa4b5bd3b18d..ba68171ad7fb 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -16,8 +16,6 @@ import signedjson.types import unpaddedbase64 -from twisted.internet.defer import Deferred - from synapse.storage.keys import FetchKeyResult import tests.unittest @@ -44,20 +42,26 @@ def test_get_server_verify_keys(self) -> None: key_id_1 = "ed25519:key1" key_id_2 = "ed25519:KEY_ID_2" - d = store.store_server_verify_keys( - "from_server", - 10, - [ - ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + self.get_success( + store.store_server_verify_keys( + "from_server", + 10, + [ + ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), + ], + ) ) - self.get_success(d) - d = store.get_server_verify_keys( - [("server1", key_id_1), ("server1", key_id_2), ("server1", "ed25519:key3")] + res = self.get_success( + store.get_server_verify_keys( + [ + ("server1", key_id_1), + ("server1", key_id_2), + ("server1", "ed25519:key3"), + ] + ) ) - res = self.get_success(d) self.assertEqual(len(res.keys()), 3) res1 = res[("server1", key_id_1)] @@ -82,18 +86,20 @@ def test_cache(self) -> None: key_id_1 = "ed25519:key1" key_id_2 = "ed25519:key2" - d = store.store_server_verify_keys( - "from_server", - 0, - [ - ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + self.get_success( + store.store_server_verify_keys( + "from_server", + 0, + [ + ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), + ], + ) ) - self.get_success(d) - d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - res = self.get_success(d) + res = self.get_success( + store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + ) self.assertEqual(len(res.keys()), 2) res1 = res[("srv1", key_id_1)] @@ -105,9 +111,7 @@ def test_cache(self) -> None: self.assertEqual(res2.valid_until_ts, 200) # we should be able to look up the same thing again without a db hit - res = store.get_server_verify_keys([("srv1", key_id_1)]) - if isinstance(res, Deferred): - res = self.successResultOf(res) + res = self.get_success(store.get_server_verify_keys([("srv1", key_id_1)])) self.assertEqual(len(res.keys()), 1) self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) @@ -119,8 +123,9 @@ def test_cache(self) -> None: ) self.get_success(d) - d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - res = self.get_success(d) + res = self.get_success( + store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + ) self.assertEqual(len(res.keys()), 2) res1 = res[("srv1", key_id_1)] diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 010cc74c3164..857e2caf2e24 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -23,7 +23,6 @@ class PurgeTests(HomeserverTestCase): - user_id = "@red:server" servlets = [room.register_servlets] @@ -112,7 +111,7 @@ def test_purge_room(self) -> None: self.room_id, "m.room.create", "" ) ) - self.assertIsNotNone(create_event) + assert create_event is not None # Purge everything before this topological token self.get_success( diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index e9bbecef2849..30226064eb14 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -38,9 +38,9 @@ def prepare( self.store = homeserver.get_datastores().main self.room_creator = homeserver.get_room_creation_handler() - self.persist_event_storage_controller = ( - self.hs.get_storage_controllers().persistence - ) + persist_event_storage_controller = self.hs.get_storage_controllers().persistence + assert persist_event_storage_controller is not None + self.persist_event_storage_controller = persist_event_storage_controller # Create a test user self.ourUser = UserID.from_string(OUR_USER_ID) @@ -51,12 +51,14 @@ def prepare( self.otherRequester = create_requester(self.otherUser) # Create a test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id1 = info["room_id"] + self.room_id1, _, _ = self.get_success( + self.room_creator.create_room(self.ourRequester, {}) + ) # Create a second test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id2 = info["room_id"] + self.room_id2, _, _ = self.get_success( + self.room_creator.create_room(self.ourRequester, {}) + ) # Join the second user to the first room memberEvent, memberEventContext = self.get_success( diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 8119c5047d4f..59f0c87c56c1 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -75,10 +75,12 @@ def inject_room_member( # type: ignore[override] }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -97,10 +99,12 @@ def inject_message(self, room: RoomID, user: UserID, body: str) -> EventBase: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -120,10 +124,12 @@ def inject_redaction( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -260,7 +266,7 @@ def type(self) -> str: def internal_metadata(self) -> _EventInternalMetadata: return self._base_builder.internal_metadata - event_1, context_1 = self.get_success( + event_1, unpersisted_context_1 = self.get_success( self.event_creation_handler.create_new_client_event( cast( EventBuilder, @@ -281,9 +287,11 @@ def internal_metadata(self) -> _EventInternalMetadata: ) ) + context_1 = self.get_success(unpersisted_context_1.persist(event_1)) + self.get_success(self._persistence.persist_event(event_1, context_1)) - event_2, context_2 = self.get_success( + event_2, unpersisted_context_2 = self.get_success( self.event_creation_handler.create_new_client_event( cast( EventBuilder, @@ -303,6 +311,8 @@ def internal_metadata(self) -> _EventInternalMetadata: ) ) ) + + context_2 = self.get_success(unpersisted_context_2.persist(event_2)) self.get_success(self._persistence.persist_event(event_2, context_2)) # fetch one of the redactions @@ -422,10 +432,12 @@ def test_store_redacted_redaction(self) -> None: }, ) - redaction_event, context = self.get_success( + redaction_event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(redaction_event)) + self.get_success(self._persistence.persist_event(redaction_event, context)) # Now lets jump to the future where we have censored the redaction event diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 14d872514d21..f183c38477b0 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -119,7 +119,6 @@ def test_non_string(self) -> None: "content": {"msgtype": "m.text", "body": 2}, "room_id": room_id, "sender": user_id, - "depth": prev_event.depth + 1, "prev_events": prev_event_ids, "origin_server_ts": self.clock.time_msec(), } @@ -134,7 +133,7 @@ def test_non_string(self) -> None: prev_state_map, for_verification=False, ), - depth=event_dict["depth"], + depth=prev_event.depth + 1, ) ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 7dd08c02503e..af976970c1f9 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -27,7 +27,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, register_servlets_for_client_rest_resource, @@ -35,7 +34,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> None: # type: ignore[override] - # We can't test the RoomMemberStore on its own without the other event # storage logic self.store = hs.get_datastores().main @@ -50,7 +48,6 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> N self.clock._reactor.advance(1) # type: ignore def test_one_member(self) -> None: - # Alice creates the room, and is automatically joined self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 938a4c7d1a6a..041a9787e0dd 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -69,10 +69,12 @@ def inject_state_event( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + assert self.storage.persistence is not None self.get_success(self.storage.persistence.persist_event(event, context)) @@ -242,7 +244,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters out members # with types=[] - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -259,7 +261,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -272,7 +274,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with wildcard types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -289,7 +291,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -309,7 +311,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -327,7 +329,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -341,7 +343,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -392,7 +394,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters out members # with types=[] room_id = self.room.to_string() - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -404,7 +406,7 @@ def test_get_state_for_event(self) -> None: self.assertDictEqual({}, state_dict) room_id = self.room.to_string() - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -417,7 +419,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # wildcard types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -428,7 +430,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -447,7 +449,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -459,7 +461,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -473,7 +475,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -485,7 +487,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -496,3 +498,129 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) + + def test_batched_state_group_storing(self) -> None: + creation_event = self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, "", {} + ) + state_to_event = self.get_success( + self.storage.state.get_state_groups( + self.room.to_string(), [creation_event.event_id] + ) + ) + current_state_group = list(state_to_event.keys())[0] + + # create some unpersisted events and event contexts to store against room + events_and_context = [] + builder = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Name, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"name": "first rename of room"}, + }, + ) + + event1, unpersisted_context1 = self.get_success( + self.event_creation_handler.create_new_client_event(builder) + ) + events_and_context.append((event1, unpersisted_context1)) + + builder2 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.JoinRules, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"join_rule": "private"}, + }, + ) + + event2, unpersisted_context2 = self.get_success( + self.event_creation_handler.create_new_client_event(builder2) + ) + events_and_context.append((event2, unpersisted_context2)) + + builder3 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Message, + "sender": self.u_alice.to_string(), + "room_id": self.room.to_string(), + "content": {"body": "hello from event 3", "msgtype": "m.text"}, + }, + ) + + event3, unpersisted_context3 = self.get_success( + self.event_creation_handler.create_new_client_event(builder3) + ) + events_and_context.append((event3, unpersisted_context3)) + + builder4 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.JoinRules, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"join_rule": "public"}, + }, + ) + + event4, unpersisted_context4 = self.get_success( + self.event_creation_handler.create_new_client_event(builder4) + ) + events_and_context.append((event4, unpersisted_context4)) + + processed_events_and_context = self.get_success( + self.hs.get_datastores().state.store_state_deltas_for_batched( + events_and_context, self.room.to_string(), current_state_group + ) + ) + + # check that only state events are in state_groups, and all state events are in state_groups + res = self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups", + keyvalues=None, + retcols=("event_id",), + ) + ) + + events = [] + for result in res: + self.assertNotIn(event3.event_id, result) + events.append(result.get("event_id")) + + for event, _ in processed_events_and_context: + if event.is_state(): + self.assertIn(event.event_id, events) + + # check that each unique state has state group in state_groups_state and that the + # type/state key is correct, and check that each state event's state group + # has an entry and prev event in state_group_edges + for event, context in processed_events_and_context: + if event.is_state(): + state = self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups_state", + keyvalues={"state_group": context.state_group_after_event}, + retcols=("type", "state_key"), + ) + ) + self.assertEqual(event.type, state[0].get("type")) + self.assertEqual(event.state_key, state[0].get("state_key")) + + groups = self.get_success( + self.store.db_pool.simple_select_list( + table="state_group_edges", + keyvalues={"state_group": str(context.state_group_after_event)}, + retcols=("*",), + ) + ) + self.assertEqual( + context.state_group_before_event, groups[0].get("prev_state_group") + ) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index bc090ebce039..05dc4f64b847 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -16,7 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.constants import Direction, EventTypes, RelationTypes from synapse.api.filtering import Filter from synapse.rest import admin from synapse.rest.client import login, room @@ -128,7 +128,7 @@ def _filter_messages(self, filter: JsonDict) -> List[str]: room_id=self.room_id, from_key=self.from_token.room_key, to_key=None, - direction="f", + direction=Direction.FORWARDS, limit=10, event_filter=Filter(self.hs, filter), ) diff --git a/tests/storage/test_unsafe_locale.py b/tests/storage/test_unsafe_locale.py index ba53c2281869..19da8a9b09da 100644 --- a/tests/storage/test_unsafe_locale.py +++ b/tests/storage/test_unsafe_locale.py @@ -14,6 +14,7 @@ from unittest.mock import MagicMock, patch from synapse.storage.database import make_conn +from synapse.storage.engines import PostgresEngine from synapse.storage.engines._base import IncorrectDatabaseSetup from tests.unittest import HomeserverTestCase @@ -38,6 +39,7 @@ def test_unsafe_locale(self, mock_db_locale: MagicMock) -> None: def test_safe_locale(self) -> None: database = self.hs.get_datastores().databases[0] + assert isinstance(database.engine, PostgresEngine) db_conn = make_conn(database._database_config, database.engine, "test_unsafe") with db_conn.cursor() as txn: diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index f1ca523d23f9..8c72aa17225a 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -25,6 +25,11 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.background_updates import _BackgroundUpdateHandler +from synapse.storage.databases.main import user_directory +from synapse.storage.databases.main.user_directory import ( + _parse_words_with_icu, + _parse_words_with_regex, +) from synapse.storage.roommember import ProfileInfo from synapse.util import Clock @@ -42,7 +47,7 @@ BOB = "@bob:b" BOBBY = "@bobby:a" # The localpart isn't 'Bela' on purpose so we can test looking up display names. -BELA = "@somenickname:a" +BELA = "@somenickname:example.org" class GetUserDirectoryTables: @@ -423,6 +428,8 @@ async def mocked_process_users(*args: Any, **kwargs: Any) -> int: class UserDirectoryStoreTestCase(HomeserverTestCase): + use_icu = False + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main @@ -434,6 +441,12 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.get_success(self.store.update_profile_in_user_dir(BELA, "Bela", None)) self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB))) + self._restore_use_icu = user_directory.USE_ICU + user_directory.USE_ICU = self.use_icu + + def tearDown(self) -> None: + user_directory.USE_ICU = self._restore_use_icu + def test_search_user_dir(self) -> None: # normally when alice searches the directory she should just find # bob because bobby doesn't share a room with her. @@ -478,6 +491,159 @@ def test_search_user_dir_stop_words(self) -> None: {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, ) + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_start_of_user_id(self) -> None: + """Tests that a user can look up another user by searching for the start + of their user ID. + """ + r = self.get_success(self.store.search_user_dir(ALICE, "somenickname:exa", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_ascii_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case. + """ + CHARLIE = "@someuser:example.org" + self.get_success( + self.store.update_profile_in_user_dir(CHARLIE, "Charlie", None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, "cHARLIE", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": CHARLIE, "display_name": "Charlie", "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_unicode_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case. + """ + IVAN = "@someuser:example.org" + self.get_success(self.store.update_profile_in_user_dir(IVAN, "Иван", None)) + + r = self.get_success(self.store.search_user_dir(ALICE, "иВАН", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": IVAN, "display_name": "Иван", "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_dotted_dotless_i_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case, when their name contains dotted or dotless "i"s. + + Some languages have dotted and dotless versions of "i", which are considered to + be different letters: i <-> İ, ı <-> I. To make things difficult, they reuse the + ASCII "i" and "I" code points, despite having different lowercase / uppercase + forms. + """ + USER = "@someuser:example.org" + + expected_matches = [ + # (search_term, display_name) + # A search for "i" should match "İ". + ("iiiii", "İİİİİ"), + # A search for "I" should match "ı". + ("IIIII", "ııııı"), + # A search for "ı" should match "I". + ("ııııı", "IIIII"), + # A search for "İ" should match "i". + ("İİİİİ", "iiiii"), + ] + + for search_term, display_name in expected_matches: + self.get_success( + self.store.update_profile_in_user_dir(USER, display_name, None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10)) + self.assertFalse(r["limited"]) + self.assertEqual( + 1, + len(r["results"]), + f"searching for {search_term!r} did not match {display_name!r}", + ) + self.assertDictEqual( + r["results"][0], + {"user_id": USER, "display_name": display_name, "avatar_url": None}, + ) + + # We don't test for negative matches, to allow implementations that consider all + # the i variants to be the same. + + test_search_user_dir_dotted_dotless_i_case_insensitivity.skip = "not supported" # type: ignore + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_unicode_normalization(self) -> None: + """Tests that a user can look up another user by searching for their name with + either composed or decomposed accents. + """ + AMELIE = "@someuser:example.org" + + expected_matches = [ + # (search_term, display_name) + ("Ame\u0301lie", "Amélie"), + ("Amélie", "Ame\u0301lie"), + ] + + for search_term, display_name in expected_matches: + self.get_success( + self.store.update_profile_in_user_dir(AMELIE, display_name, None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10)) + self.assertFalse(r["limited"]) + self.assertEqual( + 1, + len(r["results"]), + f"searching for {search_term!r} did not match {display_name!r}", + ) + self.assertDictEqual( + r["results"][0], + {"user_id": AMELIE, "display_name": display_name, "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_accent_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name + without any accents. + """ + AMELIE = "@someuser:example.org" + self.get_success(self.store.update_profile_in_user_dir(AMELIE, "Amélie", None)) + + r = self.get_success(self.store.search_user_dir(ALICE, "amelie", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": AMELIE, "display_name": "Amélie", "avatar_url": None}, + ) + + # It may be desirable for "é"s in search terms to not match plain "e"s and we + # really don't want "é"s in search terms to match "e"s with different accents. + # But we don't test for this to allow implementations that consider all + # "e"-lookalikes to be the same. + + test_search_user_dir_accent_insensitivity.skip = "not supported yet" # type: ignore + + +class UserDirectoryStoreTestCaseWithIcu(UserDirectoryStoreTestCase): + use_icu = True + + if not icu: + skip = "Requires PyICU" + class UserDirectoryICUTestCase(HomeserverTestCase): if not icu: @@ -513,3 +679,33 @@ def test_icu_word_boundary(self) -> None: r["results"][0], {"user_id": ALICE, "display_name": display_name, "avatar_url": None}, ) + + def test_icu_word_boundary_punctuation(self) -> None: + """ + Tests the behaviour of punctuation with the ICU tokeniser. + + Seems to depend on underlying version of ICU. + """ + + # Note: either tokenisation is fine, because Postgres actually splits + # words itself afterwards. + self.assertIn( + _parse_words_with_icu("lazy'fox jumped:over the.dog"), + ( + # ICU 66 on Ubuntu 20.04 + ["lazy'fox", "jumped", "over", "the", "dog"], + # ICU 70 on Ubuntu 22.04 + ["lazy'fox", "jumped:over", "the.dog"], + # pyicu 2.10.2 on Alpine edge / macOS + ["lazy'fox", "jumped", "over", "the.dog"], + ), + ) + + def test_regex_word_boundary_punctuation(self) -> None: + """ + Tests the behaviour of punctuation with the non-ICU tokeniser + """ + self.assertEqual( + _parse_words_with_regex("lazy'fox jumped:over the.dog"), + ["lazy", "fox", "jumped", "over", "the", "dog"], + ) diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 31546ea52bb0..a248f1d27711 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -21,10 +21,10 @@ class DistributorTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.dist = Distributor() - def test_signal_dispatch(self): + def test_signal_dispatch(self) -> None: self.dist.declare("alert") observer = Mock() @@ -33,7 +33,7 @@ def test_signal_dispatch(self): self.dist.fire("alert", 1, 2, 3) observer.assert_called_with(1, 2, 3) - def test_signal_catch(self): + def test_signal_catch(self) -> None: self.dist.declare("alarm") observers = [Mock() for i in (1, 2)] @@ -51,7 +51,7 @@ def test_signal_catch(self): self.assertEqual(mock_logger.warning.call_count, 1) self.assertIsInstance(mock_logger.warning.call_args[0][0], str) - def test_signal_prereg(self): + def test_signal_prereg(self) -> None: observer = Mock() self.dist.observe("flare", observer) @@ -60,8 +60,8 @@ def test_signal_prereg(self): observer.assert_called_with(4, 5) - def test_signal_undeclared(self): - def code(): + def test_signal_undeclared(self) -> None: + def code() -> None: self.dist.fire("notification") self.assertRaises(KeyError, code) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 0a7937f1cc72..2860564afc45 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -31,13 +31,13 @@ class _StubEventSourceStore: """A stub implementation of the EventSourceStore""" - def __init__(self): + def __init__(self) -> None: self._store: Dict[str, EventBase] = {} - def add_event(self, event: EventBase): + def add_event(self, event: EventBase) -> None: self._store[event.event_id] = event - def add_events(self, events: Iterable[EventBase]): + def add_events(self, events: Iterable[EventBase]) -> None: for event in events: self._store[event.event_id] = event @@ -59,7 +59,7 @@ async def get_events( class EventAuthTestCase(unittest.TestCase): - def test_rejected_auth_events(self): + def test_rejected_auth_events(self) -> None: """ Events that refer to rejected events in their auth events are rejected """ @@ -109,7 +109,7 @@ def test_rejected_auth_events(self): ) ) - def test_create_event_with_prev_events(self): + def test_create_event_with_prev_events(self) -> None: """A create event with prev_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -150,7 +150,7 @@ def test_create_event_with_prev_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event) ) - def test_duplicate_auth_events(self): + def test_duplicate_auth_events(self) -> None: """Events with duplicate auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -196,7 +196,7 @@ def test_duplicate_auth_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event2) ) - def test_unexpected_auth_events(self): + def test_unexpected_auth_events(self) -> None: """Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -236,7 +236,7 @@ def test_unexpected_auth_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event) ) - def test_random_users_cannot_send_state_before_first_pl(self): + def test_random_users_cannot_send_state_before_first_pl(self) -> None: """ Check that, before the first PL lands, the creator is the only user that can send a state event. @@ -263,7 +263,7 @@ def test_random_users_cannot_send_state_before_first_pl(self): auth_events, ) - def test_state_default_level(self): + def test_state_default_level(self) -> None: """ Check that users above the state_default level can send state and those below cannot @@ -298,7 +298,7 @@ def test_state_default_level(self): auth_events, ) - def test_alias_event(self): + def test_alias_event(self) -> None: """Alias events have special behavior up through room version 6.""" creator = "@creator:example.com" other = "@other:example.com" @@ -333,7 +333,7 @@ def test_alias_event(self): auth_events, ) - def test_msc2432_alias_event(self): + def test_msc2432_alias_event(self) -> None: """After MSC2432, alias events have no special behavior.""" creator = "@creator:example.com" other = "@other:example.com" @@ -366,7 +366,9 @@ def test_msc2432_alias_event(self): ) @parameterized.expand([(RoomVersions.V1, True), (RoomVersions.V6, False)]) - def test_notifications(self, room_version: RoomVersion, allow_modification: bool): + def test_notifications( + self, room_version: RoomVersion, allow_modification: bool + ) -> None: """ Notifications power levels get checked due to MSC2209. """ @@ -395,7 +397,7 @@ def test_notifications(self, room_version: RoomVersion, allow_modification: bool with self.assertRaises(AuthError): event_auth.check_state_dependent_auth_rules(pl_event, auth_events) - def test_join_rules_public(self): + def test_join_rules_public(self) -> None: """ Test joining a public room. """ @@ -460,7 +462,7 @@ def test_join_rules_public(self): auth_events.values(), ) - def test_join_rules_invite(self): + def test_join_rules_invite(self) -> None: """ Test joining an invite only room. """ @@ -835,7 +837,7 @@ def _power_levels_event( ) -def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: +def _alias_event(room_version: RoomVersion, sender: str, **kwargs: Any) -> EventBase: data = { "room_id": TEST_ROOM_ID, **_maybe_get_event_id_dict_for_room_version(room_version), diff --git a/tests/test_federation.py b/tests/test_federation.py index fe2964d764d6..46d2f99eacc6 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -12,54 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Collection, List, Optional, Union from unittest.mock import Mock -from twisted.internet.defer import succeed +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import FederationError -from synapse.api.room_versions import RoomVersions -from synapse.events import make_event_from_dict +from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.events import EventBase, make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.federation.federation_base import event_from_pdu_json +from synapse.handlers.device import DeviceListUpdater +from synapse.http.types import QueryParams from synapse.logging.context import LoggingContext -from synapse.types import UserID, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination from tests import unittest -from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver from tests.test_utils import make_awaitable class MessageAcceptTests(unittest.HomeserverTestCase): - def setUp(self): - + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock() - self.reactor = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.reactor) - self.reactor.advance(1) - self.homeserver = setup_test_homeserver( - self.addCleanup, - federation_http_client=self.http_client, - clock=self.hs_clock, - reactor=self.reactor, - ) + return self.setup_test_homeserver(federation_http_client=self.http_client) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: user_id = UserID("us", "test") our_user = create_requester(user_id) - room_creator = self.homeserver.get_room_creation_handler() + room_creator = self.hs.get_room_creation_handler() self.room_id = self.get_success( room_creator.create_room( our_user, room_creator._presets_dict["public_chat"], ratelimit=False ) - )[0]["room_id"] + )[0] - self.store = self.homeserver.get_datastores().main + self.store = self.hs.get_datastores().main # Figure out what the most recent event is most_recent = self.get_success( - self.homeserver.get_datastores().main.get_latest_event_ids_in_room( - self.room_id - ) + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) )[0] join_event = make_event_from_dict( @@ -79,17 +73,23 @@ def setUp(self): } ) - self.handler = self.homeserver.get_federation_handler() - federation_event_handler = self.homeserver.get_federation_event_handler() + self.handler = self.hs.get_federation_handler() + federation_event_handler = self.hs.get_federation_event_handler() - async def _check_event_auth(origin, event, context): + async def _check_event_auth( + origin: Optional[str], event: EventBase, context: EventContext + ) -> None: pass - federation_event_handler._check_event_auth = _check_event_auth - self.client = self.homeserver.get_federation_client() - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( - lambda dest, pdus, **k: succeed(pdus) - ) + federation_event_handler._check_event_auth = _check_event_auth # type: ignore[assignment] + self.client = self.hs.get_federation_client() + + async def _check_sigs_and_hash_for_pulled_events_and_fetch( + dest: str, pdus: Collection[EventBase], room_version: RoomVersion + ) -> List[EventBase]: + return list(pdus) + + self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] # Send the join, it should return None (which is not an error) self.assertEqual( @@ -105,16 +105,25 @@ async def _check_event_auth(origin, event, context): "$join:test.serv", ) - def test_cant_hide_direct_ancestors(self): + def test_cant_hide_direct_ancestors(self) -> None: """ If you send a message, you must be able to provide the direct prev_events that said event references. """ - async def post_json(destination, path, data, headers=None, timeout=0): + async def post_json( + destination: str, + path: str, + data: Optional[JsonDict] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + args: Optional[QueryParams] = None, + ) -> Union[JsonDict, list]: # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} + return {} self.http_client.post_json = post_json @@ -139,7 +148,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): } ) - federation_event_handler = self.homeserver.get_federation_event_handler() + federation_event_handler = self.hs.get_federation_event_handler() with LoggingContext("test-context"): failure = self.get_failure( federation_event_handler.on_receive_pdu("test.serv", lying_event), @@ -159,7 +168,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) self.assertEqual(extrem[0], "$join:test.serv") - def test_retry_device_list_resync(self): + def test_retry_device_list_resync(self) -> None: """Tests that device lists are marked as stale if they couldn't be synced, and that stale device lists are retried periodically. """ @@ -172,24 +181,27 @@ def test_retry_device_list_resync(self): # When this function is called, increment the number of resync attempts (only if # we're querying devices for the right user ID), then raise a # NotRetryingDestination error to fail the resync gracefully. - def query_user_devices(destination, user_id): + def query_user_devices( + destination: str, user_id: str, timeout: int = 30000 + ) -> JsonDict: if user_id == remote_user_id: self.resync_attempts += 1 raise NotRetryingDestination(0, 0, destination) # Register the mock on the federation client. - federation_client = self.homeserver.get_federation_client() - federation_client.query_user_devices = Mock(side_effect=query_user_devices) + federation_client = self.hs.get_federation_client() + federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[assignment] # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. - store = self.homeserver.get_datastores().main + store = self.hs.get_datastores().main store.get_rooms_for_user = Mock(return_value=make_awaitable(["!someroom:test"])) # Manually inject a fake device list update. We need this update to include at # least one prev_id so that the user's device list will need to be retried. - device_list_updater = self.homeserver.get_device_handler().device_list_updater + device_list_updater = self.hs.get_device_handler().device_list_updater + assert isinstance(device_list_updater, DeviceListUpdater) self.get_success( device_list_updater.incoming_device_list_update( origin=remote_origin, @@ -219,7 +231,7 @@ def query_user_devices(destination, user_id): self.reactor.advance(30) self.assertEqual(self.resync_attempts, 2) - def test_cross_signing_keys_retry(self): + def test_cross_signing_keys_retry(self) -> None: """Tests that resyncing a device list correctly processes cross-signing keys from the remote server. """ @@ -228,8 +240,8 @@ def test_cross_signing_keys_retry(self): remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. - federation_client = self.homeserver.get_federation_client() - federation_client.query_user_devices = Mock( + federation_client = self.hs.get_federation_client() + federation_client.query_user_devices = Mock( # type: ignore[assignment] return_value=make_awaitable( { "user_id": remote_user_id, @@ -253,7 +265,7 @@ def test_cross_signing_keys_retry(self): ) # Resync the device list. - device_handler = self.homeserver.get_device_handler() + device_handler = self.hs.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) @@ -262,16 +274,18 @@ def test_cross_signing_keys_retry(self): keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) - self.assertTrue(remote_user_id in keys) + self.assertIn(remote_user_id, keys) + key = keys[remote_user_id] + assert key is not None # Check that the master key is the one returned by the mock. - master_key = keys[remote_user_id]["master"] + master_key = key["master"] self.assertEqual(len(master_key["keys"]), 1) self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) self.assertTrue(remote_master_key in master_key["keys"].values()) # Check that the self-signing key is the one returned by the mock. - self_signing_key = keys[remote_user_id]["self_signing"] + self_signing_key = key["self_signing"] self.assertEqual(len(self_signing_key["keys"]), 1) self.assertTrue( "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), @@ -280,7 +294,7 @@ def test_cross_signing_keys_retry(self): class StripUnsignedFromEventsTestCase(unittest.TestCase): - def test_strip_unauthorized_unsigned_values(self): + def test_strip_unauthorized_unsigned_values(self) -> None: event1 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", @@ -297,7 +311,7 @@ def test_strip_unauthorized_unsigned_values(self): # Make sure unauthorized fields are stripped from unsigned self.assertNotIn("more warez", filtered_event.unsigned) - def test_strip_event_maintains_allowed_fields(self): + def test_strip_event_maintains_allowed_fields(self) -> None: event2 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", @@ -324,7 +338,7 @@ def test_strip_event_maintains_allowed_fields(self): self.assertIn("invite_room_state", filtered_event2.unsigned) self.assertEqual([], filtered_event2.unsigned["invite_room_state"]) - def test_strip_event_removes_fields_based_on_event_type(self): + def test_strip_event_removes_fields_based_on_event_type(self) -> None: event3 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", diff --git a/tests/test_mau.py b/tests/test_mau.py index f14fcb7db9ba..ff21098a5965 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -14,12 +14,17 @@ """Tests REST events for /rooms paths.""" -from typing import List +from typing import List, Optional + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService from synapse.rest.client import register, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.unittest import override_config @@ -27,10 +32,9 @@ class TestMauLimit(unittest.HomeserverTestCase): - servlets = [register.register_servlets, sync.register_servlets] - def default_config(self): + def default_config(self) -> JsonDict: config = default_config("test") config.update( @@ -53,10 +57,12 @@ def default_config(self): return config - def prepare(self, reactor, clock, homeserver): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self.store = homeserver.get_datastores().main - def test_simple_deny_mau(self): + def test_simple_deny_mau(self) -> None: # Create and sync so that the MAU counts get updated token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -75,7 +81,7 @@ def test_simple_deny_mau(self): self.assertEqual(e.code, 403) self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def test_as_ignores_mau(self): + def test_as_ignores_mau(self) -> None: """Test that application services can still create users when the MAU limit has been reached. This only works when application service user ip tracking is disabled. @@ -113,7 +119,7 @@ def test_as_ignores_mau(self): self.create_user("as_kermit4", token=as_token, appservice=True) - def test_allowed_after_a_month_mau(self): + def test_allowed_after_a_month_mau(self) -> None: # Create and sync so that the MAU counts get updated token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -132,7 +138,7 @@ def test_allowed_after_a_month_mau(self): self.do_sync_for_user(token3) @override_config({"mau_trial_days": 1}) - def test_trial_delay(self): + def test_trial_delay(self) -> None: # We should be able to register more than the limit initially token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -165,7 +171,7 @@ def test_trial_delay(self): self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) @override_config({"mau_trial_days": 1}) - def test_trial_users_cant_come_back(self): + def test_trial_users_cant_come_back(self) -> None: self.hs.config.server.mau_trial_days = 1 # We should be able to register more than the limit initially @@ -216,7 +222,7 @@ def test_trial_users_cant_come_back(self): # max_mau_value should not matter {"max_mau_value": 1, "limit_usage_by_mau": False, "mau_stats_only": True} ) - def test_tracked_but_not_limited(self): + def test_tracked_but_not_limited(self) -> None: # Simply being able to create 2 users indicates that the # limit was not reached. token1 = self.create_user("kermit1") @@ -236,10 +242,10 @@ def test_tracked_but_not_limited(self): "mau_appservice_trial_days": {"SomeASID": 1, "AnotherASID": 2}, } ) - def test_as_trial_days(self): + def test_as_trial_days(self) -> None: user_tokens: List[str] = [] - def advance_time_and_sync(): + def advance_time_and_sync() -> None: self.reactor.advance(24 * 60 * 61) for token in user_tokens: self.do_sync_for_user(token) @@ -300,7 +306,9 @@ def advance_time_and_sync(): }, ) - def create_user(self, localpart, token=None, appservice=False): + def create_user( + self, localpart: str, token: Optional[str] = None, appservice: bool = False + ) -> str: request_data = { "username": localpart, "password": "monkey", @@ -326,7 +334,7 @@ def create_user(self, localpart, token=None, appservice=False): return access_token - def do_sync_for_user(self, token): + def do_sync_for_user(self, token: str) -> None: channel = self.make_request("GET", "/sync", access_token=token) if channel.code != 200: diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index cc1a98f1c4ab..3f899b0d9116 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -33,7 +33,7 @@ def test_performance_frozen_clock(self) -> None: If time doesn't move, don't error out. """ past_stats = [ - (self.hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)) + (int(self.hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ] stats: JsonDict = {} self.get_success(phone_stats_home(self.hs, stats, past_stats)) diff --git a/tests/test_rust.py b/tests/test_rust.py index 55d8b6b28cb4..67443b628042 100644 --- a/tests/test_rust.py +++ b/tests/test_rust.py @@ -6,6 +6,6 @@ class RustTestCase(unittest.TestCase): """Basic tests to ensure that we can call into Rust code.""" - def test_basic(self): + def test_basic(self) -> None: result = sum_as_string(1, 2) self.assertEqual("3", result) diff --git a/tests/test_server.py b/tests/test_server.py index d67d7722a485..e266c06a2cbf 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -266,6 +266,10 @@ def _check_cors_standard_headers(self, channel: FakeChannel) -> None: [b"X-Requested-With, Content-Type, Authorization, Date"], "has correct CORS Headers header", ) + self.assertEqual( + channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"), + [b"Synapse-Trace-Id"], + ) def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None: # Ensure the correct CORS headers have been added diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index d04bcae0fabe..5cd698147e12 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -17,25 +17,25 @@ class MockClockTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() - def test_advance_time(self): + def test_advance_time(self) -> None: start_time = self.clock.time() self.clock.advance_time(20) self.assertEqual(20, self.clock.time() - start_time) - def test_later(self): + def test_later(self) -> None: invoked = [0, 0] - def _cb0(): + def _cb0() -> None: invoked[0] = 1 self.clock.call_later(10, _cb0) - def _cb1(): + def _cb1() -> None: invoked[1] = 1 self.clock.call_later(20, _cb1) @@ -51,15 +51,15 @@ def _cb1(): self.assertTrue(invoked[1]) - def test_cancel_later(self): + def test_cancel_later(self) -> None: invoked = [0, 0] - def _cb0(): + def _cb0() -> None: invoked[0] = 1 t0 = self.clock.call_later(10, _cb0) - def _cb1(): + def _cb1() -> None: invoked[1] = 1 self.clock.call_later(20, _cb1) diff --git a/tests/test_types.py b/tests/test_types.py index 111116938423..c491cc9a9661 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -43,34 +43,34 @@ def test_two_colons(self) -> None: class UserIDTestCase(unittest.HomeserverTestCase): - def test_parse(self): + def test_parse(self) -> None: user = UserID.from_string("@1234abcd:test") self.assertEqual("1234abcd", user.localpart) self.assertEqual("test", user.domain) self.assertEqual(True, self.hs.is_mine(user)) - def test_parse_rejects_empty_id(self): + def test_parse_rejects_empty_id(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("") - def test_parse_rejects_missing_sigil(self): + def test_parse_rejects_missing_sigil(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("alice:example.com") - def test_parse_rejects_missing_separator(self): + def test_parse_rejects_missing_separator(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("@alice.example.com") - def test_validation_rejects_missing_domain(self): + def test_validation_rejects_missing_domain(self) -> None: self.assertFalse(UserID.is_valid("@alice:")) - def test_build(self): + def test_build(self) -> None: user = UserID("5678efgh", "my.domain") self.assertEqual(user.to_string(), "@5678efgh:my.domain") - def test_compare(self): + def test_compare(self) -> None: userA = UserID.from_string("@userA:my.domain") userAagain = UserID.from_string("@userA:my.domain") userB = UserID.from_string("@userB:my.domain") @@ -80,43 +80,43 @@ def test_compare(self): class RoomAliasTestCase(unittest.HomeserverTestCase): - def test_parse(self): + def test_parse(self) -> None: room = RoomAlias.from_string("#channel:test") self.assertEqual("channel", room.localpart) self.assertEqual("test", room.domain) self.assertEqual(True, self.hs.is_mine(room)) - def test_build(self): + def test_build(self) -> None: room = RoomAlias("channel", "my.domain") self.assertEqual(room.to_string(), "#channel:my.domain") - def test_validate(self): + def test_validate(self) -> None: id_string = "#test:domain,test" self.assertFalse(RoomAlias.is_valid(id_string)) class MapUsernameTestCase(unittest.TestCase): - def testPassThrough(self): + def test_pass_througuh(self) -> None: self.assertEqual(map_username_to_mxid_localpart("test1234"), "test1234") - def testUpperCase(self): + def test_upper_case(self) -> None: self.assertEqual(map_username_to_mxid_localpart("tEST_1234"), "test_1234") self.assertEqual( map_username_to_mxid_localpart("tEST_1234", case_sensitive=True), "t_e_s_t__1234", ) - def testSymbols(self): + def test_symbols(self) -> None: self.assertEqual( map_username_to_mxid_localpart("test=$?_1234"), "test=3d=24=3f_1234" ) - def testLeadingUnderscore(self): + def test_leading_underscore(self) -> None: self.assertEqual(map_username_to_mxid_localpart("_test_1234"), "=5ftest_1234") - def testNonAscii(self): + def test_non_ascii(self) -> None: # this should work with either a unicode or a bytes self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast") self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast") diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index e62ebcc6a5a3..e5dae670a70e 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -20,12 +20,13 @@ import warnings from asyncio import Future from binascii import unhexlify -from typing import Awaitable, Callable, Tuple, TypeVar +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, TypeVar from unittest.mock import Mock import attr import zope.interface +from twisted.internet.interfaces import IProtocol from twisted.python.failure import Failure from twisted.web.client import ResponseDone from twisted.web.http import RESPONSES @@ -34,6 +35,9 @@ from synapse.types import JsonDict +if TYPE_CHECKING: + from sys import UnraisableHookArgs + TV = TypeVar("TV") @@ -78,25 +82,29 @@ def setup_awaitable_errors() -> Callable[[], None]: unraisable_exceptions = [] orig_unraisablehook = sys.unraisablehook - def unraisablehook(unraisable): + def unraisablehook(unraisable: "UnraisableHookArgs") -> None: unraisable_exceptions.append(unraisable.exc_value) - def cleanup(): + def cleanup() -> None: """ A method to be used as a clean-up that fails a test-case if there are any new unraisable exceptions. """ sys.unraisablehook = orig_unraisablehook if unraisable_exceptions: - raise unraisable_exceptions.pop() + exc = unraisable_exceptions.pop() + assert exc is not None + raise exc sys.unraisablehook = unraisablehook return cleanup -def simple_async_mock(return_value=None, raises=None) -> Mock: +def simple_async_mock( + return_value: Optional[TV] = None, raises: Optional[Exception] = None +) -> Mock: # AsyncMock is not available in python3.5, this mimics part of its behaviour - async def cb(*args, **kwargs): + async def cb(*args: Any, **kwargs: Any) -> Optional[TV]: if raises: raise raises return return_value @@ -125,14 +133,14 @@ class FakeResponse: # type: ignore[misc] headers: Headers = attr.Factory(Headers) @property - def phrase(self): + def phrase(self) -> bytes: return RESPONSES.get(self.code, b"Unknown Status") @property - def length(self): + def length(self) -> int: return len(self.body) - def deliverBody(self, protocol): + def deliverBody(self, protocol: IProtocol) -> None: protocol.dataReceived(self.body) protocol.connectionLost(Failure(ResponseDone())) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index 8027c7a856e2..9679904c3321 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Any, List, Optional, Tuple import synapse.server from synapse.api.constants import EventTypes @@ -32,7 +32,7 @@ async def inject_member_event( membership: str, target: Optional[str] = None, extra_content: Optional[dict] = None, - **kwargs, + **kwargs: Any, ) -> EventBase: """Inject a membership event into a room.""" if target is None: @@ -57,7 +57,7 @@ async def inject_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs, + **kwargs: Any, ) -> EventBase: """Inject a generic event into a room @@ -82,7 +82,7 @@ async def create_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs, + **kwargs: Any, ) -> Tuple[EventBase, EventContext]: if room_version is None: room_version = await hs.get_datastores().main.get_room_version_id( @@ -92,8 +92,44 @@ async def create_event( builder = hs.get_event_builder_factory().for_room_version( KNOWN_ROOM_VERSIONS[room_version], kwargs ) - event, context = await hs.get_event_creation_handler().create_new_client_event( + ( + event, + unpersisted_context, + ) = await hs.get_event_creation_handler().create_new_client_event( builder, prev_event_ids=prev_event_ids ) + context = await unpersisted_context.persist(event) + return event, context + + +async def mark_event_as_partial_state( + hs: synapse.server.HomeServer, + event_id: str, + room_id: str, +) -> None: + """ + (Falsely) mark an event as having partial state. + + Naughty, but occasionally useful when checking that partial state doesn't + block something from happening. + + If the event already has partial state, this insert will fail (event_id is unique + in this table). + """ + store = hs.get_datastores().main + await store.db_pool.simple_upsert( + table="partial_state_rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={"room_id": room_id}, + ) + + await store.db_pool.simple_insert( + table="partial_state_events", + values={ + "room_id": room_id, + "event_id": event_id, + }, + ) diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index e878af5f12e7..189c697efbee 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -13,13 +13,13 @@ # limitations under the License. from html.parser import HTMLParser -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, Iterable, List, NoReturn, Optional, Tuple class TestHtmlParser(HTMLParser): """A generic HTML page parser which extracts useful things from the HTML""" - def __init__(self): + def __init__(self) -> None: super().__init__() # a list of links found in the doc @@ -48,5 +48,5 @@ def handle_starttag( assert input_name self.hiddens[input_name] = attr_dict["value"] - def error(_, message): + def error(self, message: str) -> NoReturn: raise AssertionError(message) diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 304c7b98c5c9..b522163a3444 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -25,7 +25,7 @@ class ToTwistedHandler(logging.Handler): tx_log = twisted.logger.Logger() - def emit(self, record): + def emit(self, record: logging.LogRecord) -> None: log_entry = self.format(record) log_level = record.levelname.lower().replace("warning", "warn") self.tx_log.emit( @@ -33,7 +33,7 @@ def emit(self, record): ) -def setup_logging(): +def setup_logging() -> None: """Configure the python logging appropriately for the tests. (Logs will end up in _trial_temp.) diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py index 1461d23ee823..d555b242555d 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py @@ -14,7 +14,7 @@ import json -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, ContextManager, Dict, List, Optional, Tuple from unittest.mock import Mock, patch from urllib.parse import parse_qs @@ -77,14 +77,14 @@ def __init__(self, clock: Clock, issuer: str): self._id_token_overrides: Dict[str, Any] = {} - def reset_mocks(self): + def reset_mocks(self) -> None: self.request.reset_mock() self.get_jwks_handler.reset_mock() self.get_metadata_handler.reset_mock() self.get_userinfo_handler.reset_mock() self.post_token_handler.reset_mock() - def patch_homeserver(self, hs: HomeServer): + def patch_homeserver(self, hs: HomeServer) -> ContextManager[Mock]: """Patch the ``HomeServer`` HTTP client to handle requests through the ``FakeOidcServer``. This patch should be used whenever the HS is expected to perform request to the @@ -188,7 +188,7 @@ def generate_logout_token(self, grant: FakeAuthorizationGrant) -> str: return self._sign(logout_token) - def id_token_override(self, overrides: dict): + def id_token_override(self, overrides: dict) -> ContextManager[dict]: """Temporarily patch the ID token generated by the token endpoint.""" return patch.object(self, "_id_token_overrides", overrides) @@ -247,7 +247,7 @@ def buggy_endpoint( metadata: bool = False, token: bool = False, userinfo: bool = False, - ): + ) -> ContextManager[Dict[str, Mock]]: """A context which makes a set of endpoints return a 500 error. Args: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 76d7484b2936..0e98372708d0 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -36,6 +36,8 @@ def setUp(self) -> None: self.event_builder_factory = self.hs.get_event_builder_factory() self._storage_controllers = self.hs.get_storage_controllers() self.clock._reactor.advance(1) # type: ignore + assert self._storage_controllers.persistence is not None + self._persistence = self._storage_controllers.persistence self.get_success(create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")) @@ -62,7 +64,13 @@ def test_filtering(self) -> None: filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "test_server", "hs", events_to_filter + self._storage_controllers, + "test_server", + "hs", + events_to_filter, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) @@ -84,7 +92,13 @@ def test_filter_outlier(self) -> None: self.assertEqual( self.get_success( filter_events_for_server( - self._storage_controllers, "remote_hs", "hs", [outlier] + self._storage_controllers, + "remote_hs", + "hs", + [outlier], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ), [outlier], @@ -95,7 +109,13 @@ def test_filter_outlier(self) -> None: filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "remote_hs", "local_hs", [outlier, evt] + self._storage_controllers, + "remote_hs", + "local_hs", + [outlier, evt], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}") @@ -107,7 +127,13 @@ def test_filter_outlier(self) -> None: # be redacted) filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "other_server", "local_hs", [outlier, evt] + self._storage_controllers, + "other_server", + "local_hs", + [outlier, evt], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) self.assertEqual(filtered[0], outlier) @@ -142,7 +168,13 @@ def test_erased_user(self) -> None: # ... and the filtering happens. filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "test_server", "local_hs", events_to_filter + self._storage_controllers, + "test_server", + "local_hs", + events_to_filter, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) @@ -176,12 +208,11 @@ def _inject_visibility(self, user_id: str, visibility: str) -> EventBase: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_room_member( @@ -203,13 +234,12 @@ def _inject_room_member( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_message( @@ -227,13 +257,12 @@ def _inject_message( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_outlier(self) -> EventBase: @@ -251,7 +280,7 @@ def _inject_outlier(self) -> EventBase: event = self.get_success(builder.build(prev_event_ids=[], auth_event_ids=[])) event.internal_metadata.outlier = True self.get_success( - self._storage_controllers.persistence.persist_event( + self._persistence.persist_event( event, EventContext.for_outlier(self._storage_controllers) ) ) @@ -263,7 +292,7 @@ def setUp(self) -> None: super(FilterEventsForClientTestCase, self).setUp() self.clock._reactor.advance(1) # type: ignore - def test_out_of_band_invite_rejection(self): + def test_out_of_band_invite_rejection(self) -> None: # this is where we have received an invite event over federation, and then # rejected it. invite_pdu = { diff --git a/tests/unittest.py b/tests/unittest.py index fa92dd94eb6a..f9160faa1d0c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -45,7 +45,7 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool -from twisted.test.proto_helpers import MemoryReactor +from twisted.test.proto_helpers import MemoryReactor, MemoryReactorClock from twisted.trial import unittest from twisted.web.resource import Resource from twisted.web.server import Request @@ -82,7 +82,7 @@ ) from tests.test_utils import event_injection, setup_awaitable_errors from tests.test_utils.logging_setup import setup_logging -from tests.utils import default_config, setupdb +from tests.utils import checked_cast, default_config, setupdb setupdb() setup_logging() @@ -296,7 +296,12 @@ def setUp(self) -> None: from tests.rest.client.utils import RestHelper - self.helper = RestHelper(self.hs, self.site, getattr(self, "user_id", None)) + self.helper = RestHelper( + self.hs, + checked_cast(MemoryReactorClock, self.hs.get_reactor()), + self.site, + getattr(self, "user_id", None), + ) if hasattr(self, "user_id"): if self.hijack_auth: @@ -315,7 +320,7 @@ def setUp(self) -> None: # This has to be a function and not just a Mock, because # `self.helper.auth_user_id` is temporarily reassigned in some tests - async def get_requester(*args, **kwargs) -> Requester: + async def get_requester(*args: Any, **kwargs: Any) -> Requester: assert self.helper.auth_user_id is not None return create_requester( user_id=UserID.from_string(self.helper.auth_user_id), @@ -361,7 +366,9 @@ def wait_for_background_updates(self) -> None: store.db_pool.updates.do_next_background_update(False), by=0.1 ) - def make_homeserver(self, reactor: ThreadedMemoryReactorClock, clock: Clock): + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: """ Make and return a homeserver. @@ -716,7 +723,7 @@ def create_and_send_event( event_creator = self.hs.get_event_creation_handler() requester = create_requester(user) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creator.create_event( requester, { @@ -728,7 +735,7 @@ def create_and_send_event( prev_event_ids=prev_event_ids, ) ) - + context = self.get_success(unpersisted_context.persist(event)) if soft_failed: event.internal_metadata.soft_failed = True diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 9529ee53c8a2..5f8f4e76b544 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -54,6 +54,7 @@ def test_limiter(self) -> None: self.pump() new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + assert new_timings is not None self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, failure_ts) self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL) @@ -82,6 +83,7 @@ def test_limiter(self) -> None: self.pump() new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + assert new_timings is not None self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, retry_ts) self.assertGreaterEqual( diff --git a/tests/utils.py b/tests/utils.py index d76bf9716ad2..a0ac11bc5cd2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,7 +15,7 @@ import atexit import os -from typing import Any, Callable, Dict, List, Tuple, Union, overload +from typing import Any, Callable, Dict, List, Tuple, Type, TypeVar, Union, overload import attr from typing_extensions import Literal, ParamSpec @@ -335,6 +335,33 @@ async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None: }, ) - event, context = await event_creation_handler.create_new_client_event(builder) + event, unpersisted_context = await event_creation_handler.create_new_client_event( + builder + ) + context = await unpersisted_context.persist(event) await persistence_store.persist_event(event, context) + + +T = TypeVar("T") + + +def checked_cast(type: Type[T], x: object) -> T: + """A version of typing.cast that is checked at runtime. + + We have our own function for this for two reasons: + + 1. typing.cast itself is deliberately a no-op at runtime, see + https://docs.python.org/3/library/typing.html#typing.cast + 2. To help workaround a mypy-zope bug https://github.com/Shoobx/mypy-zope/issues/91 + where mypy would erroneously consider `isinstance(x, type)` to be false in all + circumstances. + + For this to make sense, `T` needs to be something that `isinstance` can check; see + https://docs.python.org/3/library/functions.html?highlight=isinstance#isinstance + https://docs.python.org/3/glossary.html#term-abstract-base-class + https://docs.python.org/3/library/typing.html#typing.runtime_checkable + for more details. + """ + assert isinstance(x, type) + return x